From 506fc4de1e820d97b637f6e01dda2ab97667efa7 Mon Sep 17 00:00:00 2001 From: Trevor Norris Date: Wed, 22 May 2013 12:56:12 -0700 Subject: v8: upgrade to v3.19.3 --- deps/v8/ChangeLog | 27 + deps/v8/build/common.gypi | 10 +- deps/v8/include/v8.h | 458 +++++- deps/v8/src/api.cc | 435 +++-- deps/v8/src/api.h | 2 - deps/v8/src/apiutils.h | 25 - deps/v8/src/arguments.cc | 195 +++ deps/v8/src/arguments.h | 261 ++- deps/v8/src/arm/assembler-arm.cc | 20 +- deps/v8/src/arm/assembler-arm.h | 20 + deps/v8/src/arm/builtins-arm.cc | 49 +- deps/v8/src/arm/code-stubs-arm.cc | 203 ++- deps/v8/src/arm/code-stubs-arm.h | 11 +- deps/v8/src/arm/codegen-arm.cc | 10 +- deps/v8/src/arm/debug-arm.cc | 4 +- deps/v8/src/arm/deoptimizer-arm.cc | 200 +-- deps/v8/src/arm/disasm-arm.cc | 8 + deps/v8/src/arm/frames-arm.cc | 4 + deps/v8/src/arm/full-codegen-arm.cc | 129 +- deps/v8/src/arm/ic-arm.cc | 18 +- deps/v8/src/arm/lithium-arm.cc | 38 +- deps/v8/src/arm/lithium-arm.h | 42 +- deps/v8/src/arm/lithium-codegen-arm.cc | 228 +-- deps/v8/src/arm/lithium-codegen-arm.h | 18 +- deps/v8/src/arm/macro-assembler-arm.cc | 123 +- deps/v8/src/arm/macro-assembler-arm.h | 71 +- deps/v8/src/arm/simulator-arm.cc | 61 +- deps/v8/src/arm/stub-cache-arm.cc | 112 +- deps/v8/src/array.js | 23 + deps/v8/src/assembler.h | 10 +- deps/v8/src/ast.h | 19 +- deps/v8/src/bootstrapper.cc | 150 +- deps/v8/src/bootstrapper.h | 2 + deps/v8/src/builtins.cc | 39 +- deps/v8/src/builtins.h | 3 + deps/v8/src/checks.cc | 2 +- deps/v8/src/code-stubs-hydrogen.cc | 5 +- deps/v8/src/code-stubs.cc | 104 +- deps/v8/src/code-stubs.h | 154 +- deps/v8/src/compiler.cc | 31 +- deps/v8/src/compiler.h | 22 + deps/v8/src/contexts.h | 11 +- deps/v8/src/cpu-profiler.cc | 16 +- deps/v8/src/cpu-profiler.h | 10 +- deps/v8/src/d8.cc | 679 +------- deps/v8/src/deoptimizer.cc | 281 +++- deps/v8/src/deoptimizer.h | 92 +- deps/v8/src/disassembler.cc | 9 +- .../src/extensions/externalize-string-extension.cc | 4 +- deps/v8/src/extensions/statistics-extension.cc | 2 +- deps/v8/src/factory.h | 16 +- deps/v8/src/flag-definitions.h | 10 +- deps/v8/src/frames.h | 4 + deps/v8/src/handles.cc | 17 +- deps/v8/src/heap.cc | 130 +- deps/v8/src/heap.h | 45 +- deps/v8/src/hydrogen-instructions.cc | 104 +- deps/v8/src/hydrogen-instructions.h | 180 +- deps/v8/src/hydrogen.cc | 415 +++-- deps/v8/src/hydrogen.h | 18 +- deps/v8/src/ia32/builtins-ia32.cc | 5 + deps/v8/src/ia32/code-stubs-ia32.cc | 134 +- deps/v8/src/ia32/code-stubs-ia32.h | 12 +- deps/v8/src/ia32/deoptimizer-ia32.cc | 215 +-- deps/v8/src/ia32/frames-ia32.cc | 4 + deps/v8/src/ia32/full-codegen-ia32.cc | 94 +- deps/v8/src/ia32/lithium-codegen-ia32.cc | 222 +-- deps/v8/src/ia32/lithium-codegen-ia32.h | 18 +- deps/v8/src/ia32/lithium-ia32.cc | 43 +- deps/v8/src/ia32/lithium-ia32.h | 56 +- deps/v8/src/ia32/macro-assembler-ia32.cc | 49 +- deps/v8/src/ia32/macro-assembler-ia32.h | 9 +- deps/v8/src/ia32/stub-cache-ia32.cc | 70 +- deps/v8/src/ic.cc | 51 +- deps/v8/src/ic.h | 2 - deps/v8/src/isolate.cc | 47 +- deps/v8/src/isolate.h | 18 +- deps/v8/src/log.cc | 10 +- deps/v8/src/log.h | 3 + deps/v8/src/mark-compact.cc | 110 ++ deps/v8/src/mark-compact.h | 36 +- deps/v8/src/messages.js | 3 + deps/v8/src/mips/builtins-mips.cc | 5 + deps/v8/src/mips/code-stubs-mips.cc | 92 +- deps/v8/src/mips/code-stubs-mips.h | 11 +- deps/v8/src/mips/deoptimizer-mips.cc | 197 +-- deps/v8/src/mips/frames-mips.cc | 4 + deps/v8/src/mips/full-codegen-mips.cc | 107 +- deps/v8/src/mips/ic-mips.cc | 2 +- deps/v8/src/mips/lithium-codegen-mips.cc | 198 +-- deps/v8/src/mips/lithium-codegen-mips.h | 22 +- deps/v8/src/mips/lithium-mips.cc | 38 +- deps/v8/src/mips/lithium-mips.h | 42 +- deps/v8/src/mips/macro-assembler-mips.cc | 33 +- deps/v8/src/mips/macro-assembler-mips.h | 5 +- deps/v8/src/mips/simulator-mips.cc | 49 +- deps/v8/src/mips/stub-cache-mips.cc | 61 +- deps/v8/src/object-observe.js | 242 ++- deps/v8/src/objects-debug.cc | 11 + deps/v8/src/objects-inl.h | 76 +- deps/v8/src/objects-printer.cc | 4 +- deps/v8/src/objects-visiting-inl.h | 38 +- deps/v8/src/objects.cc | 446 +++-- deps/v8/src/objects.h | 61 +- deps/v8/src/parser.cc | 27 +- deps/v8/src/parser.h | 2 +- deps/v8/src/platform-posix.cc | 17 +- deps/v8/src/profile-generator-inl.h | 3 +- deps/v8/src/profile-generator.cc | 35 +- deps/v8/src/profile-generator.h | 11 +- deps/v8/src/property-details.h | 12 +- deps/v8/src/runtime-profiler.cc | 5 +- deps/v8/src/runtime.cc | 60 +- deps/v8/src/runtime.h | 1 - deps/v8/src/string-stream.h | 25 + deps/v8/src/string.js | 1 - deps/v8/src/stub-cache.cc | 56 +- deps/v8/src/stub-cache.h | 3 +- deps/v8/src/type-info.cc | 43 +- deps/v8/src/utils.h | 41 +- deps/v8/src/v8.cc | 1 + deps/v8/src/v8natives.js | 9 +- deps/v8/src/version.cc | 2 +- deps/v8/src/x64/builtins-x64.cc | 5 + deps/v8/src/x64/code-stubs-x64.cc | 104 +- deps/v8/src/x64/code-stubs-x64.h | 11 +- deps/v8/src/x64/deoptimizer-x64.cc | 192 +-- deps/v8/src/x64/frames-x64.cc | 4 + deps/v8/src/x64/full-codegen-x64.cc | 95 +- deps/v8/src/x64/ic-x64.cc | 2 +- deps/v8/src/x64/lithium-codegen-x64.cc | 212 +-- deps/v8/src/x64/lithium-codegen-x64.h | 19 +- deps/v8/src/x64/lithium-x64.cc | 38 +- deps/v8/src/x64/lithium-x64.h | 44 +- deps/v8/src/x64/macro-assembler-x64.cc | 41 +- deps/v8/src/x64/macro-assembler-x64.h | 7 +- deps/v8/src/x64/stub-cache-x64.cc | 63 +- deps/v8/test/cctest/cctest.cc | 2 + deps/v8/test/cctest/cctest.gyp | 1 + deps/v8/test/cctest/cctest.status | 13 +- deps/v8/test/cctest/test-api.cc | 447 ++++- deps/v8/test/cctest/test-assembler-arm.cc | 9 + deps/v8/test/cctest/test-compare-nil-ic-stub.cc | 86 + deps/v8/test/cctest/test-conversions.cc | 19 + deps/v8/test/cctest/test-cpu-profiler.cc | 74 +- deps/v8/test/cctest/test-debug.cc | 10 +- deps/v8/test/cctest/test-deoptimization.cc | 4 +- deps/v8/test/cctest/test-disasm-arm.cc | 2 + deps/v8/test/cctest/test-heap-profiler.cc | 20 +- deps/v8/test/cctest/test-heap.cc | 7 + deps/v8/test/cctest/test-lockers.cc | 2 +- deps/v8/test/cctest/test-mark-compact.cc | 11 +- deps/v8/test/cctest/test-parsing.cc | 3 +- deps/v8/test/mjsunit/allocation-site-info.js | 4 - deps/v8/test/mjsunit/compiler/alloc-object.js | 4 +- deps/v8/test/mjsunit/compiler/dead-code.js | 79 + deps/v8/test/mjsunit/compiler/dead-code2.js | 84 + deps/v8/test/mjsunit/compiler/dead-code3.js | 78 + deps/v8/test/mjsunit/compiler/dead-code4.js | 78 + deps/v8/test/mjsunit/compiler/dead-code5.js | 89 + deps/v8/test/mjsunit/compiler/dead-code6.js | 73 + deps/v8/test/mjsunit/constant-folding-2.js | 9 + deps/v8/test/mjsunit/debug-script.js | 2 +- .../test/mjsunit/elements-transition-hoisting.js | 2 +- deps/v8/test/mjsunit/elide-double-hole-check-1.js | 52 + deps/v8/test/mjsunit/elide-double-hole-check-2.js | 41 + deps/v8/test/mjsunit/elide-double-hole-check-3.js | 39 + deps/v8/test/mjsunit/elide-double-hole-check-4.js | 39 + deps/v8/test/mjsunit/elide-double-hole-check-5.js | 40 + deps/v8/test/mjsunit/elide-double-hole-check-6.js | 39 + deps/v8/test/mjsunit/elide-double-hole-check-7.js | 40 + deps/v8/test/mjsunit/elide-double-hole-check-8.js | 40 + deps/v8/test/mjsunit/elide-double-hole-check-9.js | 49 + deps/v8/test/mjsunit/external-array-no-sse2.js | 9 +- deps/v8/test/mjsunit/external-array.js | 9 +- deps/v8/test/mjsunit/fast-element-smi-check.js | 6 +- deps/v8/test/mjsunit/function-prototype.js | 23 +- deps/v8/test/mjsunit/generated-transition-stub.js | 2 +- .../test/mjsunit/harmony/generators-iteration.js | 387 +++-- deps/v8/test/mjsunit/harmony/object-observe.js | 357 +++- deps/v8/test/mjsunit/mjsunit.status | 6 +- deps/v8/test/mjsunit/regress/regress-241344.js | 40 + deps/v8/test/mjsunit/regress/regress-2681.js | 48 + deps/v8/test/mjsunit/regress/regress-2686.js | 32 + .../test/mjsunit/regress/regress-crbug-233737.js | 42 + .../test/mjsunit/regress/regress-crbug-242502.js | 66 + .../test/mjsunit/regress/regress-crbug-242870.js | 43 + .../mjsunit/regress/regress-seqstrsetchar-ex1.js | 60 + .../mjsunit/regress/regress-seqstrsetchar-ex2.js | 35 + deps/v8/test/mjsunit/track-fields.js | 64 + deps/v8/test/mjsunit/unbox-double-arrays.js | 3 + deps/v8/test/test262/README | 4 +- deps/v8/test/test262/testcfg.py | 4 +- deps/v8/tools/gyp/v8.gyp | 1737 ++++++++++---------- 194 files changed, 8638 insertions(+), 5465 deletions(-) create mode 100644 deps/v8/src/arguments.cc create mode 100644 deps/v8/test/cctest/test-compare-nil-ic-stub.cc create mode 100644 deps/v8/test/mjsunit/compiler/dead-code.js create mode 100644 deps/v8/test/mjsunit/compiler/dead-code2.js create mode 100644 deps/v8/test/mjsunit/compiler/dead-code3.js create mode 100644 deps/v8/test/mjsunit/compiler/dead-code4.js create mode 100644 deps/v8/test/mjsunit/compiler/dead-code5.js create mode 100644 deps/v8/test/mjsunit/compiler/dead-code6.js create mode 100644 deps/v8/test/mjsunit/elide-double-hole-check-1.js create mode 100644 deps/v8/test/mjsunit/elide-double-hole-check-2.js create mode 100644 deps/v8/test/mjsunit/elide-double-hole-check-3.js create mode 100644 deps/v8/test/mjsunit/elide-double-hole-check-4.js create mode 100644 deps/v8/test/mjsunit/elide-double-hole-check-5.js create mode 100644 deps/v8/test/mjsunit/elide-double-hole-check-6.js create mode 100644 deps/v8/test/mjsunit/elide-double-hole-check-7.js create mode 100644 deps/v8/test/mjsunit/elide-double-hole-check-8.js create mode 100644 deps/v8/test/mjsunit/elide-double-hole-check-9.js create mode 100644 deps/v8/test/mjsunit/regress/regress-241344.js create mode 100644 deps/v8/test/mjsunit/regress/regress-2681.js create mode 100644 deps/v8/test/mjsunit/regress/regress-2686.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-233737.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-242502.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-242870.js create mode 100644 deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex1.js create mode 100644 deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex2.js (limited to 'deps/v8') diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog index d1dbb29ed..39885e783 100644 --- a/deps/v8/ChangeLog +++ b/deps/v8/ChangeLog @@ -1,3 +1,30 @@ +2013-05-22: Version 3.19.3 + + Performance and stability improvements on all platforms. + + +2013-05-17: Version 3.19.2 + + Fill in one-word-fillers for the unused property fields + (Chromium issue 240056). + + Removed use_system_v8 logic from the mainline gyp file + (Chromium issue 226860). + + Skip CPU profiler samples where top function's stack frame is not + set up properly (issue 2628). + + Performance and stability improvements on all platforms. + + +2013-05-14: Version 3.19.1 + + Fixed missing hole check for loads from Smi arrays when all uses are + changes (Chromium issue 233737) + + Performance and stability improvements on all platforms. + + 2013-05-10: Version 3.19.0 Deprecated Context::New which returns Persistent. diff --git a/deps/v8/build/common.gypi b/deps/v8/build/common.gypi index 8028b3eec..2e3c7854d 100644 --- a/deps/v8/build/common.gypi +++ b/deps/v8/build/common.gypi @@ -29,7 +29,6 @@ { 'variables': { - 'use_system_v8%': 0, 'msvs_use_common_release': 0, 'gcc_version%': 'unknown', 'CXX%': '${CXX:-$(which g++)}', # Used to assemble a shell command. @@ -454,6 +453,15 @@ }], ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" \ or OS=="android"', { + 'cflags!': [ + '-O2', + '-Os', + ], + 'cflags': [ + '-fdata-sections', + '-ffunction-sections', + '-O3', + ], 'conditions': [ [ 'gcc_version==44 and clang==0', { 'cflags': [ diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index 3a86e86e0..b3dff3fee 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -144,6 +144,17 @@ class Value; template class Handle; template class Local; template class Persistent; +class FunctionTemplate; +class ObjectTemplate; +class Data; +class AccessorInfo; +template class PropertyCallbackInfo; +class StackTrace; +class StackFrame; +class Isolate; +class DeclaredAccessorDescriptor; +class ObjectOperationDescriptor; +class RawOperationDescriptor; namespace internal { class Arguments; @@ -151,6 +162,10 @@ class Heap; class HeapObject; class Isolate; class Object; +template +class CustomArguments; +class PropertyCallbackArguments; +class FunctionCallbackArguments; } @@ -695,6 +710,16 @@ template class Persistent // NOLINT */ V8_INLINE(void Reset(Isolate* isolate, const Handle& other)); + /** + * Returns the underlying raw pointer and clears the handle. The caller is + * responsible of eventually destroying the underlying object (by creating a + * Persistent handle which points to it and Disposing it). In the future, + * destructing a Persistent will also Dispose it. With this function, the + * embedder can let the Persistent go out of scope without it getting + * disposed. + */ + V8_INLINE(T* ClearAndLeak()); + #ifndef V8_USE_UNSAFE_HANDLES #ifndef V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT @@ -1779,6 +1804,7 @@ class V8EXPORT String : public Primitive { */ class V8EXPORT AsciiValue { public: + // TODO(dcarney): deprecate explicit AsciiValue(Handle obj); ~AsciiValue(); char* operator*() { return str_; } @@ -1853,6 +1879,7 @@ class V8EXPORT Number : public Primitive { public: double Value() const; static Local New(double value); + static Local New(Isolate* isolate, double value); V8_INLINE(static Number* Cast(v8::Value* obj)); private: Number(); @@ -1925,11 +1952,18 @@ enum ExternalArrayType { */ typedef Handle (*AccessorGetter)(Local property, const AccessorInfo& info); +typedef void (*AccessorGetterCallback)( + Local property, + const PropertyCallbackInfo& info); typedef void (*AccessorSetter)(Local property, Local value, const AccessorInfo& info); +typedef void (*AccessorSetterCallback)( + Local property, + Local value, + const PropertyCallbackInfo& info); /** @@ -1999,12 +2033,19 @@ class V8EXPORT Object : public Value { bool Delete(uint32_t index); + // TODO(dcarney): deprecate bool SetAccessor(Handle name, AccessorGetter getter, AccessorSetter setter = 0, Handle data = Handle(), AccessControl settings = DEFAULT, PropertyAttribute attribute = None); + bool SetAccessor(Handle name, + AccessorGetterCallback getter, + AccessorSetterCallback setter = 0, + Handle data = Handle(), + AccessControl settings = DEFAULT, + PropertyAttribute attribute = None); // This function is not yet stable and should not be used at this time. bool SetAccessor(Handle name, @@ -2693,13 +2734,36 @@ class V8EXPORT Template : public Data { }; +template +class V8EXPORT ReturnValue { + public: + V8_INLINE(explicit ReturnValue(internal::Object** slot)); + // Handle setters + V8_INLINE(void Set(const Persistent& handle)); + V8_INLINE(void Set(const Handle handle)); + // Fast primitive setters + V8_INLINE(void Set(Isolate* isolate, bool value)); + V8_INLINE(void Set(Isolate* isolate, double i)); + V8_INLINE(void Set(Isolate* isolate, int32_t i)); + V8_INLINE(void Set(Isolate* isolate, uint32_t i)); + // Fast JS primitive setters + V8_INLINE(void SetNull(Isolate* isolate)); + V8_INLINE(void SetUndefined(Isolate* isolate)); + private: + V8_INLINE(void SetTrue(Isolate* isolate)); + V8_INLINE(void SetFalse(Isolate* isolate)); + internal::Object** value_; +}; + + /** * The argument information given to function call callbacks. This * class provides access to information about the context of the call, * including the receiver, the number and values of arguments, and * the holder of the function. */ -class V8EXPORT Arguments { +template +class V8EXPORT FunctionCallbackInfo { public: V8_INLINE(int Length() const); V8_INLINE(Local operator[](int i) const); @@ -2709,15 +2773,20 @@ class V8EXPORT Arguments { V8_INLINE(bool IsConstructCall() const); V8_INLINE(Local Data() const); V8_INLINE(Isolate* GetIsolate() const); + V8_INLINE(ReturnValue GetReturnValue() const); + // This shouldn't be public, but the arm compiler needs it. + static const int kArgsLength = 5; - private: - static const int kIsolateIndex = 0; - static const int kDataIndex = -1; - static const int kCalleeIndex = -2; - static const int kHolderIndex = -3; - - friend class ImplementationUtilities; - V8_INLINE(Arguments(internal::Object** implicit_args, + protected: + friend class internal::FunctionCallbackArguments; + friend class internal::CustomArguments; + static const int kReturnValueIndex = 0; + static const int kIsolateIndex = -1; + static const int kDataIndex = -2; + static const int kCalleeIndex = -3; + static const int kHolderIndex = -4; + + V8_INLINE(FunctionCallbackInfo(internal::Object** implicit_args, internal::Object** values, int length, bool is_construct_call)); @@ -2728,25 +2797,56 @@ class V8EXPORT Arguments { }; +class V8EXPORT Arguments : public FunctionCallbackInfo { + private: + friend class internal::FunctionCallbackArguments; + V8_INLINE(Arguments(internal::Object** implicit_args, + internal::Object** values, + int length, + bool is_construct_call)); +}; + /** - * The information passed to an accessor callback about the context + * The information passed to a property callback about the context * of the property access. */ -class V8EXPORT AccessorInfo { +template +class V8EXPORT PropertyCallbackInfo { public: - V8_INLINE(AccessorInfo(internal::Object** args)) - : args_(args) { } V8_INLINE(Isolate* GetIsolate() const); V8_INLINE(Local Data() const); V8_INLINE(Local This() const); V8_INLINE(Local Holder() const); + V8_INLINE(ReturnValue GetReturnValue() const); + // This shouldn't be public, but the arm compiler needs it. + static const int kArgsLength = 5; - private: + protected: + friend class MacroAssembler; + friend class internal::PropertyCallbackArguments; + friend class internal::CustomArguments; + static const int kThisIndex = 0; + static const int kHolderIndex = -1; + static const int kDataIndex = -2; + static const int kIsolateIndex = -3; + static const int kReturnValueIndex = -4; + + V8_INLINE(PropertyCallbackInfo(internal::Object** args)) + : args_(args) { } internal::Object** args_; }; +class V8EXPORT AccessorInfo : public PropertyCallbackInfo { + private: + friend class internal::PropertyCallbackArguments; + V8_INLINE(AccessorInfo(internal::Object** args)) + : PropertyCallbackInfo(args) { } +}; + + typedef Handle (*InvocationCallback)(const Arguments& args); +typedef void (*FunctionCallback)(const FunctionCallbackInfo& info); /** * NamedProperty[Getter|Setter] are used as interceptors on object. @@ -2754,6 +2854,9 @@ typedef Handle (*InvocationCallback)(const Arguments& args); */ typedef Handle (*NamedPropertyGetter)(Local property, const AccessorInfo& info); +typedef void (*NamedPropertyGetterCallback)( + Local property, + const PropertyCallbackInfo& info); /** @@ -2763,6 +2866,11 @@ typedef Handle (*NamedPropertyGetter)(Local property, typedef Handle (*NamedPropertySetter)(Local property, Local value, const AccessorInfo& info); +typedef void (*NamedPropertySetterCallback)( + Local property, + Local value, + const PropertyCallbackInfo& info); + /** * Returns a non-empty handle if the interceptor intercepts the request. @@ -2771,6 +2879,9 @@ typedef Handle (*NamedPropertySetter)(Local property, */ typedef Handle (*NamedPropertyQuery)(Local property, const AccessorInfo& info); +typedef void (*NamedPropertyQueryCallback)( + Local property, + const PropertyCallbackInfo& info); /** @@ -2780,12 +2891,18 @@ typedef Handle (*NamedPropertyQuery)(Local property, */ typedef Handle (*NamedPropertyDeleter)(Local property, const AccessorInfo& info); +typedef void (*NamedPropertyDeleterCallback)( + Local property, + const PropertyCallbackInfo& info); + /** * Returns an array containing the names of the properties the named * property getter intercepts. */ typedef Handle (*NamedPropertyEnumerator)(const AccessorInfo& info); +typedef void (*NamedPropertyEnumeratorCallback)( + const PropertyCallbackInfo& info); /** @@ -2794,6 +2911,9 @@ typedef Handle (*NamedPropertyEnumerator)(const AccessorInfo& info); */ typedef Handle (*IndexedPropertyGetter)(uint32_t index, const AccessorInfo& info); +typedef void (*IndexedPropertyGetterCallback)( + uint32_t index, + const PropertyCallbackInfo& info); /** @@ -2803,6 +2923,10 @@ typedef Handle (*IndexedPropertyGetter)(uint32_t index, typedef Handle (*IndexedPropertySetter)(uint32_t index, Local value, const AccessorInfo& info); +typedef void (*IndexedPropertySetterCallback)( + uint32_t index, + Local value, + const PropertyCallbackInfo& info); /** @@ -2811,6 +2935,10 @@ typedef Handle (*IndexedPropertySetter)(uint32_t index, */ typedef Handle (*IndexedPropertyQuery)(uint32_t index, const AccessorInfo& info); +typedef void (*IndexedPropertyQueryCallback)( + uint32_t index, + const PropertyCallbackInfo& info); + /** * Returns a non-empty handle if the deleter intercepts the request. @@ -2819,12 +2947,18 @@ typedef Handle (*IndexedPropertyQuery)(uint32_t index, */ typedef Handle (*IndexedPropertyDeleter)(uint32_t index, const AccessorInfo& info); +typedef void (*IndexedPropertyDeleterCallback)( + uint32_t index, + const PropertyCallbackInfo& info); + /** * Returns an array containing the indices of the properties the * indexed property getter intercepts. */ typedef Handle (*IndexedPropertyEnumerator)(const AccessorInfo& info); +typedef void (*IndexedPropertyEnumeratorCallback)( + const PropertyCallbackInfo& info); /** @@ -2954,11 +3088,18 @@ typedef bool (*IndexedSecurityCallback)(Local host, class V8EXPORT FunctionTemplate : public Template { public: /** Creates a function template.*/ + // TODO(dcarney): deprecate static Local New( InvocationCallback callback = 0, Handle data = Handle(), Handle signature = Handle(), int length = 0); + static Local New( + FunctionCallback callback, // TODO(dcarney): add back default param. + Handle data = Handle(), + Handle signature = Handle(), + int length = 0); + /** Returns the unique function instance in the current execution context.*/ Local GetFunction(); @@ -2967,8 +3108,11 @@ class V8EXPORT FunctionTemplate : public Template { * callback is called whenever the function created from this * FunctionTemplate is called. */ + // TODO(dcarney): deprecate void SetCallHandler(InvocationCallback callback, Handle data = Handle()); + void SetCallHandler(FunctionCallback callback, + Handle data = Handle()); /** Set the predefined length property for the FunctionTemplate. */ void SetLength(int length); @@ -3020,21 +3164,6 @@ class V8EXPORT FunctionTemplate : public Template { private: FunctionTemplate(); - void SetNamedInstancePropertyHandler(NamedPropertyGetter getter, - NamedPropertySetter setter, - NamedPropertyQuery query, - NamedPropertyDeleter remover, - NamedPropertyEnumerator enumerator, - Handle data); - void SetIndexedInstancePropertyHandler(IndexedPropertyGetter getter, - IndexedPropertySetter setter, - IndexedPropertyQuery query, - IndexedPropertyDeleter remover, - IndexedPropertyEnumerator enumerator, - Handle data); - void SetInstanceCallAsFunctionHandler(InvocationCallback callback, - Handle data); - friend class Context; friend class ObjectTemplate; }; @@ -3083,6 +3212,7 @@ class V8EXPORT ObjectTemplate : public Template { * defined by FunctionTemplate::HasInstance()), an implicit TypeError is * thrown and no callback is invoked. */ + // TODO(dcarney): deprecate void SetAccessor(Handle name, AccessorGetter getter, AccessorSetter setter = 0, @@ -3091,6 +3221,14 @@ class V8EXPORT ObjectTemplate : public Template { PropertyAttribute attribute = None, Handle signature = Handle()); + void SetAccessor(Handle name, + AccessorGetterCallback getter, + AccessorSetterCallback setter = 0, + Handle data = Handle(), + AccessControl settings = DEFAULT, + PropertyAttribute attribute = None, + Handle signature = + Handle()); // This function is not yet stable and should not be used at this time. bool SetAccessor(Handle name, @@ -3117,12 +3255,20 @@ class V8EXPORT ObjectTemplate : public Template { * \param data A piece of data that will be passed to the callbacks * whenever they are invoked. */ + // TODO(dcarney): deprecate void SetNamedPropertyHandler(NamedPropertyGetter getter, NamedPropertySetter setter = 0, NamedPropertyQuery query = 0, NamedPropertyDeleter deleter = 0, NamedPropertyEnumerator enumerator = 0, Handle data = Handle()); + void SetNamedPropertyHandler( + NamedPropertyGetterCallback getter, + NamedPropertySetterCallback setter = 0, + NamedPropertyQueryCallback query = 0, + NamedPropertyDeleterCallback deleter = 0, + NamedPropertyEnumeratorCallback enumerator = 0, + Handle data = Handle()); /** * Sets an indexed property handler on the object template. @@ -3140,12 +3286,20 @@ class V8EXPORT ObjectTemplate : public Template { * \param data A piece of data that will be passed to the callbacks * whenever they are invoked. */ + // TODO(dcarney): deprecate void SetIndexedPropertyHandler(IndexedPropertyGetter getter, IndexedPropertySetter setter = 0, IndexedPropertyQuery query = 0, IndexedPropertyDeleter deleter = 0, IndexedPropertyEnumerator enumerator = 0, Handle data = Handle()); + void SetIndexedPropertyHandler( + IndexedPropertyGetterCallback getter, + IndexedPropertySetterCallback setter = 0, + IndexedPropertyQueryCallback query = 0, + IndexedPropertyDeleterCallback deleter = 0, + IndexedPropertyEnumeratorCallback enumerator = 0, + Handle data = Handle()); /** * Sets the callback to be used when calling instances created from @@ -3153,8 +3307,11 @@ class V8EXPORT ObjectTemplate : public Template { * behave like normal JavaScript objects that cannot be called as a * function. */ + // TODO(dcarney): deprecate void SetCallAsFunctionHandler(InvocationCallback callback, Handle data = Handle()); + void SetCallAsFunctionHandler(FunctionCallback callback, + Handle data = Handle()); /** * Mark object instances of the template as undetectable. @@ -3830,25 +3987,29 @@ struct JitCodeEvent { // CODE_ADD_LINE_POS_INFO and CODE_END_LINE_INFO_RECORDING events. void* user_data; + struct name_t { + // Name of the object associated with the code, note that the string is not + // zero-terminated. + const char* str; + // Number of chars in str. + size_t len; + }; + + struct line_info_t { + // PC offset + size_t offset; + // Code postion + size_t pos; + // The position type. + PositionType position_type; + }; + union { // Only valid for CODE_ADDED. - struct { - // Name of the object associated with the code, note that the string is - // not zero-terminated. - const char* str; - // Number of chars in str. - size_t len; - } name; + struct name_t name; // Only valid for CODE_ADD_LINE_POS_INFO - struct { - // PC offset - size_t offset; - // Code postion - size_t pos; - // The position type. - PositionType position_type; - } line_info; + struct line_info_t line_info; // New location of instructions. Only valid for CODE_MOVED. void* new_code_start; @@ -4609,11 +4770,10 @@ class V8EXPORT Context { Handle global_object = Handle()); /** Deprecated. Use Isolate version instead. */ - // TODO(mstarzinger): Put this behind the V8_DEPRECATED guard. - static Persistent New( + V8_DEPRECATED(static Persistent New( ExtensionConfiguration* extensions = NULL, Handle global_template = Handle(), - Handle global_object = Handle()); + Handle global_object = Handle())); /** Returns the last entered context. */ static Local GetEntered(); @@ -4982,6 +5142,14 @@ const intptr_t kSmiTagMask = (1 << kSmiTagSize) - 1; template struct SmiTagging; +template +V8_INLINE(internal::Object* IntToSmi(int value)) { + int smi_shift_bits = kSmiTagSize + kSmiShiftSize; + intptr_t tagged_value = + (static_cast(value) << smi_shift_bits) | kSmiTag; + return reinterpret_cast(tagged_value); +} + // Smi constants for 32-bit systems. template <> struct SmiTagging<4> { static const int kSmiShiftSize = 0; @@ -4991,6 +5159,23 @@ template <> struct SmiTagging<4> { // Throw away top 32 bits and shift down (requires >> to be sign extending). return static_cast(reinterpret_cast(value)) >> shift_bits; } + V8_INLINE(static internal::Object* IntToSmi(int value)) { + return internal::IntToSmi(value); + } + V8_INLINE(static bool IsValidSmi(intptr_t value)) { + // To be representable as an tagged small integer, the two + // most-significant bits of 'value' must be either 00 or 11 due to + // sign-extension. To check this we add 01 to the two + // most-significant bits, and check if the most-significant bit is 0 + // + // CAUTION: The original code below: + // bool result = ((value + 0x40000000) & 0x80000000) == 0; + // may lead to incorrect results according to the C language spec, and + // in fact doesn't work correctly with gcc4.1.1 in some cases: The + // compiler may produce undefined results in case of signed integer + // overflow. The computation must be done w/ unsigned ints. + return static_cast(value + 0x40000000U) < 0x80000000U; + } }; // Smi constants for 64-bit systems. @@ -5002,6 +5187,13 @@ template <> struct SmiTagging<8> { // Shift down and throw away top 32 bits. return static_cast(reinterpret_cast(value) >> shift_bits); } + V8_INLINE(static internal::Object* IntToSmi(int value)) { + return internal::IntToSmi(value); + } + V8_INLINE(static bool IsValidSmi(intptr_t value)) { + // To be representable as a long smi, the value must be a 32-bit integer. + return (value == static_cast(value)); + } }; typedef SmiTagging PlatformSmiTagging; @@ -5026,7 +5218,7 @@ class Internals { static const int kJSObjectHeaderSize = 3 * kApiPointerSize; static const int kFixedArrayHeaderSize = 2 * kApiPointerSize; static const int kContextHeaderSize = 2 * kApiPointerSize; - static const int kContextEmbedderDataIndex = 65; + static const int kContextEmbedderDataIndex = 64; static const int kFullStringRepresentationMask = 0x07; static const int kStringEncodingMask = 0x4; static const int kExternalTwoByteRepresentationTag = 0x02; @@ -5039,7 +5231,7 @@ class Internals { static const int kNullValueRootIndex = 7; static const int kTrueValueRootIndex = 8; static const int kFalseValueRootIndex = 9; - static const int kEmptyStringRootIndex = 118; + static const int kEmptyStringRootIndex = 127; static const int kNodeClassIdOffset = 1 * kApiPointerSize; static const int kNodeFlagsOffset = 1 * kApiPointerSize + 3; @@ -5066,6 +5258,14 @@ class Internals { return PlatformSmiTagging::SmiToInt(value); } + V8_INLINE(static internal::Object* IntToSmi(int value)) { + return PlatformSmiTagging::IntToSmi(value); + } + + V8_INLINE(static bool IsValidSmi(intptr_t value)) { + return PlatformSmiTagging::IsValidSmi(value); + } + V8_INLINE(static int GetInstanceType(internal::Object* obj)) { typedef internal::Object O; O* map = ReadField(obj, kHeapObjectMapOffset); @@ -5384,6 +5584,7 @@ void Persistent::SetWrapperClassId(uint16_t class_id) { SetWrapperClassId(Isolate::GetCurrent(), class_id); } + template void Persistent::Reset(Isolate* isolate, const Handle& other) { Dispose(isolate); @@ -5400,6 +5601,21 @@ void Persistent::Reset(Isolate* isolate, const Handle& other) { #endif } + +template +T* Persistent::ClearAndLeak() { + T* old; +#ifdef V8_USE_UNSAFE_HANDLES + old = **this; + *this = Persistent(); +#else + old = val_; + val_ = NULL; +#endif + return old; +} + + template void Persistent::SetWrapperClassId(Isolate* isolate, uint16_t class_id) { typedef internal::Internals I; @@ -5425,54 +5641,150 @@ uint16_t Persistent::WrapperClassId(Isolate* isolate) const { return *reinterpret_cast(addr); } -Arguments::Arguments(internal::Object** implicit_args, - internal::Object** values, int length, - bool is_construct_call) + +template +ReturnValue::ReturnValue(internal::Object** slot) : value_(slot) {} + +template +void ReturnValue::Set(const Persistent& handle) { + *value_ = *reinterpret_cast(*handle); +} + +template +void ReturnValue::Set(const Handle handle) { + *value_ = *reinterpret_cast(*handle); +} + +template +void ReturnValue::Set(Isolate* isolate, double i) { + Set(Number::New(isolate, i)); +} + +template +void ReturnValue::Set(Isolate* isolate, int32_t i) { + typedef internal::Internals I; + if (V8_LIKELY(I::IsValidSmi(i))) { + *value_ = I::IntToSmi(i); + return; + } + Set(Integer::New(i, isolate)); +} + +template +void ReturnValue::Set(Isolate* isolate, uint32_t i) { + typedef internal::Internals I; + if (V8_LIKELY(I::IsValidSmi(i))) { + *value_ = I::IntToSmi(i); + return; + } + Set(Integer::NewFromUnsigned(i, isolate)); +} + +template +void ReturnValue::Set(Isolate* isolate, bool value) { + if (value) { + SetTrue(isolate); + } else { + SetFalse(isolate); + } +} + +template +void ReturnValue::SetTrue(Isolate* isolate) { + typedef internal::Internals I; + *value_ = *I::GetRoot(isolate, I::kTrueValueRootIndex); +} + +template +void ReturnValue::SetFalse(Isolate* isolate) { + typedef internal::Internals I; + *value_ = *I::GetRoot(isolate, I::kFalseValueRootIndex); +} + +template +void ReturnValue::SetNull(Isolate* isolate) { + typedef internal::Internals I; + *value_ = *I::GetRoot(isolate, I::kNullValueRootIndex); +} + +template +void ReturnValue::SetUndefined(Isolate* isolate) { + typedef internal::Internals I; + *value_ = *I::GetRoot(isolate, I::kUndefinedValueRootIndex); +} + + +template +FunctionCallbackInfo::FunctionCallbackInfo(internal::Object** implicit_args, + internal::Object** values, + int length, + bool is_construct_call) : implicit_args_(implicit_args), values_(values), length_(length), is_construct_call_(is_construct_call) { } -Local Arguments::operator[](int i) const { +Arguments::Arguments(internal::Object** args, + internal::Object** values, + int length, + bool is_construct_call) + : FunctionCallbackInfo(args, values, length, is_construct_call) { } + + +template +Local FunctionCallbackInfo::operator[](int i) const { if (i < 0 || length_ <= i) return Local(*Undefined()); return Local(reinterpret_cast(values_ - i)); } -Local Arguments::Callee() const { +template +Local FunctionCallbackInfo::Callee() const { return Local(reinterpret_cast( &implicit_args_[kCalleeIndex])); } -Local Arguments::This() const { +template +Local FunctionCallbackInfo::This() const { return Local(reinterpret_cast(values_ + 1)); } -Local Arguments::Holder() const { +template +Local FunctionCallbackInfo::Holder() const { return Local(reinterpret_cast( &implicit_args_[kHolderIndex])); } -Local Arguments::Data() const { +template +Local FunctionCallbackInfo::Data() const { return Local(reinterpret_cast(&implicit_args_[kDataIndex])); } -Isolate* Arguments::GetIsolate() const { +template +Isolate* FunctionCallbackInfo::GetIsolate() const { return *reinterpret_cast(&implicit_args_[kIsolateIndex]); } -bool Arguments::IsConstructCall() const { +template +ReturnValue FunctionCallbackInfo::GetReturnValue() const { + return ReturnValue(&implicit_args_[kReturnValueIndex]); +} + + +template +bool FunctionCallbackInfo::IsConstructCall() const { return is_construct_call_; } -int Arguments::Length() const { +template +int FunctionCallbackInfo::Length() const { return length_; } @@ -5861,23 +6173,33 @@ External* External::Cast(v8::Value* value) { } -Isolate* AccessorInfo::GetIsolate() const { - return *reinterpret_cast(&args_[-3]); +template +Isolate* PropertyCallbackInfo::GetIsolate() const { + return *reinterpret_cast(&args_[kIsolateIndex]); +} + + +template +Local PropertyCallbackInfo::Data() const { + return Local(reinterpret_cast(&args_[kDataIndex])); } -Local AccessorInfo::Data() const { - return Local(reinterpret_cast(&args_[-2])); +template +Local PropertyCallbackInfo::This() const { + return Local(reinterpret_cast(&args_[kThisIndex])); } -Local AccessorInfo::This() const { - return Local(reinterpret_cast(&args_[0])); +template +Local PropertyCallbackInfo::Holder() const { + return Local(reinterpret_cast(&args_[kHolderIndex])); } -Local AccessorInfo::Holder() const { - return Local(reinterpret_cast(&args_[-1])); +template +ReturnValue PropertyCallbackInfo::GetReturnValue() const { + return ReturnValue(&args_[kReturnValueIndex]); } diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index 8a6eaf476..7099ca8dd 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -983,8 +983,12 @@ void FunctionTemplate::Inherit(v8::Handle value) { } -Local FunctionTemplate::New(InvocationCallback callback, - v8::Handle data, v8::Handle signature, int length) { +template +static Local FunctionTemplateNew( + Callback callback_in, + v8::Handle data, + v8::Handle signature, + int length) { i::Isolate* isolate = i::Isolate::Current(); EnsureInitializedForIsolate(isolate, "v8::FunctionTemplate::New()"); LOG_API(isolate, "FunctionTemplate::New"); @@ -997,8 +1001,10 @@ Local FunctionTemplate::New(InvocationCallback callback, int next_serial_number = isolate->next_serial_number(); isolate->set_next_serial_number(next_serial_number + 1); obj->set_serial_number(i::Smi::FromInt(next_serial_number)); - if (callback != 0) { + if (callback_in != 0) { if (data.IsEmpty()) data = v8::Undefined(); + InvocationCallback callback = + i::CallbackTable::Register(isolate, callback_in); Utils::ToLocal(obj)->SetCallHandler(callback, data); } obj->set_length(length); @@ -1011,6 +1017,24 @@ Local FunctionTemplate::New(InvocationCallback callback, } +Local FunctionTemplate::New( + InvocationCallback callback, + v8::Handle data, + v8::Handle signature, + int length) { + return FunctionTemplateNew(callback, data, signature, length); +} + + +Local FunctionTemplate::New( + FunctionCallback callback, + v8::Handle data, + v8::Handle signature, + int length) { + return FunctionTemplateNew(callback, data, signature, length); +} + + Local Signature::New(Handle receiver, int argc, Handle argv[]) { i::Isolate* isolate = i::Isolate::Current(); @@ -1202,9 +1226,11 @@ int TypeSwitch::match(v8::Handle value) { } while (false) -void FunctionTemplate::SetCallHandler(InvocationCallback callback, - v8::Handle data) { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); +template +static void FunctionTemplateSetCallHandler(FunctionTemplate* function_template, + Callback callback, + v8::Handle data) { + i::Isolate* isolate = Utils::OpenHandle(function_template)->GetIsolate(); if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetCallHandler()")) return; ENTER_V8(isolate); i::HandleScope scope(isolate); @@ -1215,9 +1241,18 @@ void FunctionTemplate::SetCallHandler(InvocationCallback callback, SET_FIELD_WRAPPED(obj, set_callback, callback); if (data.IsEmpty()) data = v8::Undefined(); obj->set_data(*Utils::OpenHandle(*data)); - Utils::OpenHandle(this)->set_call_code(*obj); + Utils::OpenHandle(function_template)->set_call_code(*obj); +} + +void FunctionTemplate::SetCallHandler(InvocationCallback callback, + v8::Handle data) { + FunctionTemplateSetCallHandler(this, callback, data); } +void FunctionTemplate::SetCallHandler(FunctionCallback callback, + v8::Handle data) { + FunctionTemplateSetCallHandler(this, callback, data); +} static i::Handle SetAccessorInfoProperties( i::Handle obj, @@ -1237,10 +1272,11 @@ static i::Handle SetAccessorInfoProperties( } +template static i::Handle MakeAccessorInfo( v8::Handle name, - AccessorGetter getter, - AccessorSetter setter, + Getter getter_in, + Setter setter_in, v8::Handle data, v8::AccessControl settings, v8::PropertyAttribute attributes, @@ -1248,7 +1284,9 @@ static i::Handle MakeAccessorInfo( i::Isolate* isolate = Utils::OpenHandle(*name)->GetIsolate(); i::Handle obj = isolate->factory()->NewExecutableAccessorInfo(); + AccessorGetter getter = i::CallbackTable::Register(isolate, getter_in); SET_FIELD_WRAPPED(obj, set_getter, getter); + AccessorSetter setter = i::CallbackTable::Register(isolate, setter_in); SET_FIELD_WRAPPED(obj, set_setter, setter); if (data.IsEmpty()) data = v8::Undefined(); obj->set_data(*Utils::OpenHandle(*data)); @@ -1259,6 +1297,8 @@ static i::Handle MakeAccessorInfo( static i::Handle MakeAccessorInfo( v8::Handle name, v8::Handle descriptor, + void* setter_ignored, + void* data_ignored, v8::AccessControl settings, v8::PropertyAttribute attributes, v8::Handle signature) { @@ -1323,15 +1363,21 @@ void FunctionTemplate::ReadOnlyPrototype() { Utils::OpenHandle(this)->set_read_only_prototype(true); } - -void FunctionTemplate::SetNamedInstancePropertyHandler( - NamedPropertyGetter getter, - NamedPropertySetter setter, - NamedPropertyQuery query, - NamedPropertyDeleter remover, - NamedPropertyEnumerator enumerator, +template< + typename Getter, + typename Setter, + typename Query, + typename Deleter, + typename Enumerator> +static void SetNamedInstancePropertyHandler( + i::Handle function_template, + Getter getter_in, + Setter setter_in, + Query query_in, + Deleter remover_in, + Enumerator enumerator_in, Handle data) { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); + i::Isolate* isolate = function_template->GetIsolate(); if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetNamedInstancePropertyHandler()")) { return; @@ -1343,26 +1389,40 @@ void FunctionTemplate::SetNamedInstancePropertyHandler( i::Handle obj = i::Handle::cast(struct_obj); + NamedPropertyGetter getter = i::CallbackTable::Register(isolate, getter_in); if (getter != 0) SET_FIELD_WRAPPED(obj, set_getter, getter); + NamedPropertySetter setter = i::CallbackTable::Register(isolate, setter_in); if (setter != 0) SET_FIELD_WRAPPED(obj, set_setter, setter); + NamedPropertyQuery query = i::CallbackTable::Register(isolate, query_in); if (query != 0) SET_FIELD_WRAPPED(obj, set_query, query); + NamedPropertyDeleter remover = + i::CallbackTable::Register(isolate, remover_in); if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover); + NamedPropertyEnumerator enumerator = + i::CallbackTable::Register(isolate, enumerator_in); if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator); if (data.IsEmpty()) data = v8::Undefined(); obj->set_data(*Utils::OpenHandle(*data)); - Utils::OpenHandle(this)->set_named_property_handler(*obj); -} - - -void FunctionTemplate::SetIndexedInstancePropertyHandler( - IndexedPropertyGetter getter, - IndexedPropertySetter setter, - IndexedPropertyQuery query, - IndexedPropertyDeleter remover, - IndexedPropertyEnumerator enumerator, + function_template->set_named_property_handler(*obj); +} + + +template< + typename Getter, + typename Setter, + typename Query, + typename Deleter, + typename Enumerator> +static void SetIndexedInstancePropertyHandler( + i::Handle function_template, + Getter getter_in, + Setter setter_in, + Query query_in, + Deleter remover_in, + Enumerator enumerator_in, Handle data) { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); + i::Isolate* isolate = function_template->GetIsolate(); if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetIndexedInstancePropertyHandler()")) { return; @@ -1374,22 +1434,33 @@ void FunctionTemplate::SetIndexedInstancePropertyHandler( i::Handle obj = i::Handle::cast(struct_obj); + IndexedPropertyGetter getter = + i::CallbackTable::Register(isolate, getter_in); if (getter != 0) SET_FIELD_WRAPPED(obj, set_getter, getter); + IndexedPropertySetter setter = + i::CallbackTable::Register(isolate, setter_in); if (setter != 0) SET_FIELD_WRAPPED(obj, set_setter, setter); + IndexedPropertyQuery query = i::CallbackTable::Register(isolate, query_in); if (query != 0) SET_FIELD_WRAPPED(obj, set_query, query); + IndexedPropertyDeleter remover = + i::CallbackTable::Register(isolate, remover_in); if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover); + IndexedPropertyEnumerator enumerator = + i::CallbackTable::Register(isolate, enumerator_in); if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator); if (data.IsEmpty()) data = v8::Undefined(); obj->set_data(*Utils::OpenHandle(*data)); - Utils::OpenHandle(this)->set_indexed_property_handler(*obj); + function_template->set_indexed_property_handler(*obj); } -void FunctionTemplate::SetInstanceCallAsFunctionHandler( - InvocationCallback callback, +template +static void SetInstanceCallAsFunctionHandler( + i::Handle function_template, + Callback callback_in, Handle data) { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); + i::Isolate* isolate = function_template->GetIsolate(); if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetInstanceCallAsFunctionHandler()")) { return; @@ -1400,10 +1471,12 @@ void FunctionTemplate::SetInstanceCallAsFunctionHandler( isolate->factory()->NewStruct(i::CALL_HANDLER_INFO_TYPE); i::Handle obj = i::Handle::cast(struct_obj); + InvocationCallback callback = + i::CallbackTable::Register(isolate, callback_in); SET_FIELD_WRAPPED(obj, set_callback, callback); if (data.IsEmpty()) data = v8::Undefined(); obj->set_data(*Utils::OpenHandle(*data)); - Utils::OpenHandle(this)->set_instance_call_handler(*obj); + function_template->set_instance_call_handler(*obj); } @@ -1461,6 +1534,32 @@ static inline void AddPropertyToFunctionTemplate( } +template +static bool ObjectTemplateSetAccessor( + ObjectTemplate* object_template, + v8::Handle name, + Getter getter, + Setter setter, + Data data, + AccessControl settings, + PropertyAttribute attribute, + v8::Handle signature) { + i::Isolate* isolate = Utils::OpenHandle(object_template)->GetIsolate(); + if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetAccessor()")) return false; + ENTER_V8(isolate); + i::HandleScope scope(isolate); + EnsureConstructor(object_template); + i::FunctionTemplateInfo* constructor = i::FunctionTemplateInfo::cast( + Utils::OpenHandle(object_template)->constructor()); + i::Handle cons(constructor); + i::Handle obj = MakeAccessorInfo( + name, getter, setter, data, settings, attribute, signature); + if (obj.is_null()) return false; + AddPropertyToFunctionTemplate(cons, obj); + return true; +} + + void ObjectTemplate::SetAccessor(v8::Handle name, AccessorGetter getter, AccessorSetter setter, @@ -1468,64 +1567,89 @@ void ObjectTemplate::SetAccessor(v8::Handle name, AccessControl settings, PropertyAttribute attribute, v8::Handle signature) { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetAccessor()")) return; - ENTER_V8(isolate); - i::HandleScope scope(isolate); - EnsureConstructor(this); - i::FunctionTemplateInfo* constructor = - i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor()); - i::Handle cons(constructor); - i::Handle obj = MakeAccessorInfo(name, getter, setter, data, - settings, attribute, - signature); - AddPropertyToFunctionTemplate(cons, obj); + ObjectTemplateSetAccessor( + this, name, getter, setter, data, settings, attribute, signature); } -bool ObjectTemplate::SetAccessor(Handle name, - Handle descriptor, +void ObjectTemplate::SetAccessor(v8::Handle name, + AccessorGetterCallback getter, + AccessorSetterCallback setter, + v8::Handle data, AccessControl settings, PropertyAttribute attribute, - Handle signature) { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetAccessor()")) return false; - ENTER_V8(isolate); - i::HandleScope scope(isolate); - EnsureConstructor(this); - i::FunctionTemplateInfo* constructor = - i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor()); - i::Handle cons(constructor); - i::Handle obj = MakeAccessorInfo( - name, descriptor, settings, attribute, signature); - if (obj.is_null()) return false; - AddPropertyToFunctionTemplate(cons, obj); - return true; + v8::Handle signature) { + ObjectTemplateSetAccessor( + this, name, getter, setter, data, settings, attribute, signature); } -void ObjectTemplate::SetNamedPropertyHandler(NamedPropertyGetter getter, - NamedPropertySetter setter, - NamedPropertyQuery query, - NamedPropertyDeleter remover, - NamedPropertyEnumerator enumerator, - Handle data) { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); +bool ObjectTemplate::SetAccessor(Handle name, + Handle descriptor, + AccessControl settings, + PropertyAttribute attribute, + Handle signature) { + void* null = NULL; + return ObjectTemplateSetAccessor( + this, name, descriptor, null, null, settings, attribute, signature); +} + + +template< + typename Getter, + typename Setter, + typename Query, + typename Deleter, + typename Enumerator> +static void ObjectTemplateSetNamedPropertyHandler( + ObjectTemplate* object_template, + Getter getter, + Setter setter, + Query query, + Deleter remover, + Enumerator enumerator, + Handle data) { + i::Isolate* isolate = Utils::OpenHandle(object_template)->GetIsolate(); if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetNamedPropertyHandler()")) { return; } ENTER_V8(isolate); i::HandleScope scope(isolate); - EnsureConstructor(this); - i::FunctionTemplateInfo* constructor = - i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor()); + EnsureConstructor(object_template); + i::FunctionTemplateInfo* constructor = i::FunctionTemplateInfo::cast( + Utils::OpenHandle(object_template)->constructor()); i::Handle cons(constructor); - Utils::ToLocal(cons)->SetNamedInstancePropertyHandler(getter, - setter, - query, - remover, - enumerator, - data); + SetNamedInstancePropertyHandler(cons, + getter, + setter, + query, + remover, + enumerator, + data); +} + + +void ObjectTemplate::SetNamedPropertyHandler( + NamedPropertyGetter getter, + NamedPropertySetter setter, + NamedPropertyQuery query, + NamedPropertyDeleter remover, + NamedPropertyEnumerator enumerator, + Handle data) { + ObjectTemplateSetNamedPropertyHandler( + this, getter, setter, query, remover, enumerator, data); +} + + +void ObjectTemplate::SetNamedPropertyHandler( + NamedPropertyGetterCallback getter, + NamedPropertySetterCallback setter, + NamedPropertyQueryCallback query, + NamedPropertyDeleterCallback remover, + NamedPropertyEnumeratorCallback enumerator, + Handle data) { + ObjectTemplateSetNamedPropertyHandler( + this, getter, setter, query, remover, enumerator, data); } @@ -1574,46 +1698,93 @@ void ObjectTemplate::SetAccessCheckCallbacks( } -void ObjectTemplate::SetIndexedPropertyHandler( - IndexedPropertyGetter getter, - IndexedPropertySetter setter, - IndexedPropertyQuery query, - IndexedPropertyDeleter remover, - IndexedPropertyEnumerator enumerator, +template< + typename Getter, + typename Setter, + typename Query, + typename Deleter, + typename Enumerator> +void ObjectTemplateSetIndexedPropertyHandler( + ObjectTemplate* object_template, + Getter getter, + Setter setter, + Query query, + Deleter remover, + Enumerator enumerator, Handle data) { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); + i::Isolate* isolate = Utils::OpenHandle(object_template)->GetIsolate(); if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetIndexedPropertyHandler()")) { return; } ENTER_V8(isolate); i::HandleScope scope(isolate); - EnsureConstructor(this); - i::FunctionTemplateInfo* constructor = - i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor()); + EnsureConstructor(object_template); + i::FunctionTemplateInfo* constructor = i::FunctionTemplateInfo::cast( + Utils::OpenHandle(object_template)->constructor()); i::Handle cons(constructor); - Utils::ToLocal(cons)->SetIndexedInstancePropertyHandler(getter, - setter, - query, - remover, - enumerator, - data); + SetIndexedInstancePropertyHandler(cons, + getter, + setter, + query, + remover, + enumerator, + data); } -void ObjectTemplate::SetCallAsFunctionHandler(InvocationCallback callback, - Handle data) { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); +void ObjectTemplate::SetIndexedPropertyHandler( + IndexedPropertyGetter getter, + IndexedPropertySetter setter, + IndexedPropertyQuery query, + IndexedPropertyDeleter remover, + IndexedPropertyEnumerator enumerator, + Handle data) { + ObjectTemplateSetIndexedPropertyHandler( + this, getter, setter, query, remover, enumerator, data); +} + + +void ObjectTemplate::SetIndexedPropertyHandler( + IndexedPropertyGetterCallback getter, + IndexedPropertySetterCallback setter, + IndexedPropertyQueryCallback query, + IndexedPropertyDeleterCallback remover, + IndexedPropertyEnumeratorCallback enumerator, + Handle data) { + ObjectTemplateSetIndexedPropertyHandler( + this, getter, setter, query, remover, enumerator, data); +} + + +template +static void ObjectTemplateSetCallAsFunctionHandler( + ObjectTemplate* object_template, + Callback callback, + Handle data) { + i::Isolate* isolate = Utils::OpenHandle(object_template)->GetIsolate(); if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetCallAsFunctionHandler()")) { return; } ENTER_V8(isolate); i::HandleScope scope(isolate); - EnsureConstructor(this); - i::FunctionTemplateInfo* constructor = - i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor()); + EnsureConstructor(object_template); + i::FunctionTemplateInfo* constructor = i::FunctionTemplateInfo::cast( + Utils::OpenHandle(object_template)->constructor()); i::Handle cons(constructor); - Utils::ToLocal(cons)->SetInstanceCallAsFunctionHandler(callback, data); + SetInstanceCallAsFunctionHandler(cons, callback, data); +} + + +void ObjectTemplate::SetCallAsFunctionHandler(InvocationCallback callback, + Handle data) { + return ObjectTemplateSetCallAsFunctionHandler(this, callback, data); +} + + +void ObjectTemplate::SetCallAsFunctionHandler(FunctionCallback callback, + Handle data) { + return ObjectTemplateSetCallAsFunctionHandler(this, callback, data); } @@ -3446,7 +3617,21 @@ bool v8::Object::Has(uint32_t index) { } -static inline bool SetAccessor(Object* obj, i::Handle info) { +template +static inline bool ObjectSetAccessor(Object* obj, + Handle name, + Setter getter, + Getter setter, + Data data, + AccessControl settings, + PropertyAttribute attributes) { + i::Isolate* isolate = Utils::OpenHandle(obj)->GetIsolate(); + ON_BAILOUT(isolate, "v8::Object::SetAccessor()", return false); + ENTER_V8(isolate); + i::HandleScope scope(isolate); + v8::Handle signature; + i::Handle info = MakeAccessorInfo( + name, getter, setter, data, settings, attributes, signature); if (info.is_null()) return false; bool fast = Utils::OpenHandle(obj)->HasFastProperties(); i::Handle result = i::SetAccessor(Utils::OpenHandle(obj), info); @@ -3462,15 +3647,19 @@ bool Object::SetAccessor(Handle name, v8::Handle data, AccessControl settings, PropertyAttribute attributes) { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - ON_BAILOUT(isolate, "v8::Object::SetAccessor()", return false); - ENTER_V8(isolate); - i::HandleScope scope(isolate); - v8::Handle signature; - i::Handle info = MakeAccessorInfo(name, getter, setter, data, - settings, attributes, - signature); - return v8::SetAccessor(this, info); + return ObjectSetAccessor( + this, name, getter, setter, data, settings, attributes); +} + + +bool Object::SetAccessor(Handle name, + AccessorGetterCallback getter, + AccessorSetterCallback setter, + v8::Handle data, + AccessControl settings, + PropertyAttribute attributes) { + return ObjectSetAccessor( + this, name, getter, setter, data, settings, attributes); } @@ -3478,14 +3667,9 @@ bool Object::SetAccessor(Handle name, Handle descriptor, AccessControl settings, PropertyAttribute attributes) { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - ON_BAILOUT(isolate, "v8::Object::SetAccessor()", return false); - ENTER_V8(isolate); - i::HandleScope scope(isolate); - v8::Handle signature; - i::Handle info = MakeAccessorInfo( - name, descriptor, settings, attributes, signature); - return v8::SetAccessor(this, info); + void* null = NULL; + return ObjectSetAccessor( + this, name, descriptor, null, null, settings, attributes); } @@ -5953,10 +6137,6 @@ i::Handle NewTypedArray( isolate->factory()->NewExternalArray( static_cast(length), array_type, static_cast(buffer->backing_store()) + byte_offset); - i::Handle map = - isolate->factory()->GetElementsTransitionMap( - obj, elements_kind); - obj->set_map(*map); obj->set_elements(*elements); return obj; } @@ -6027,12 +6207,19 @@ Local v8::Symbol::New(Isolate* isolate, const char* data, int length) { Local v8::Number::New(double value) { i::Isolate* isolate = i::Isolate::Current(); EnsureInitializedForIsolate(isolate, "v8::Number::New()"); + return Number::New(reinterpret_cast(isolate), value); +} + + +Local v8::Number::New(Isolate* isolate, double value) { + i::Isolate* internal_isolate = reinterpret_cast(isolate); + ASSERT(internal_isolate->IsInitialized()); if (std::isnan(value)) { // Introduce only canonical NaN value into the VM, to avoid signaling NaNs. value = i::OS::nan_value(); } - ENTER_V8(isolate); - i::Handle result = isolate->factory()->NewNumber(value); + ENTER_V8(internal_isolate); + i::Handle result = internal_isolate->factory()->NewNumber(value); return Utils::NumberToLocal(result); } diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h index 686abf75c..12d6e3d08 100644 --- a/deps/v8/src/api.h +++ b/deps/v8/src/api.h @@ -149,12 +149,10 @@ class RegisteredExtension { static void UnregisterAll(); Extension* extension() { return extension_; } RegisteredExtension* next() { return next_; } - RegisteredExtension* next_auto() { return next_auto_; } static RegisteredExtension* first_extension() { return first_extension_; } private: Extension* extension_; RegisteredExtension* next_; - RegisteredExtension* next_auto_; static RegisteredExtension* first_extension_; }; diff --git a/deps/v8/src/apiutils.h b/deps/v8/src/apiutils.h index 9831f0866..076558564 100644 --- a/deps/v8/src/apiutils.h +++ b/deps/v8/src/apiutils.h @@ -39,31 +39,6 @@ class ImplementationUtilities { return that->names_; } - // Packs additional parameters for the NewArguments function. |implicit_args| - // is a pointer to the last element of 4-elements array controlled by GC. - static void PrepareArgumentsData(internal::Object** implicit_args, - internal::Isolate* isolate, - internal::Object* data, - internal::JSFunction* callee, - internal::Object* holder) { - implicit_args[v8::Arguments::kDataIndex] = data; - implicit_args[v8::Arguments::kCalleeIndex] = callee; - implicit_args[v8::Arguments::kHolderIndex] = holder; - implicit_args[v8::Arguments::kIsolateIndex] = - reinterpret_cast(isolate); - } - - static v8::Arguments NewArguments(internal::Object** implicit_args, - internal::Object** argv, int argc, - bool is_construct_call) { - ASSERT(implicit_args[v8::Arguments::kCalleeIndex]->IsJSFunction()); - ASSERT(implicit_args[v8::Arguments::kHolderIndex]->IsHeapObject()); - // The implicit isolate argument is not tagged and looks like a SMI. - ASSERT(implicit_args[v8::Arguments::kIsolateIndex]->IsSmi()); - - return v8::Arguments(implicit_args, argv, argc, is_construct_call); - } - // Introduce an alias for the handle scope data to allow non-friends // to access the HandleScope data. typedef v8::HandleScope::Data HandleScopeData; diff --git a/deps/v8/src/arguments.cc b/deps/v8/src/arguments.cc new file mode 100644 index 000000000..091d0b92a --- /dev/null +++ b/deps/v8/src/arguments.cc @@ -0,0 +1,195 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "v8.h" +#include "arguments.h" + +namespace v8 { +namespace internal { + + +static bool Match(void* a, void* b) { + return a == b; +} + + +static uint32_t Hash(void* function) { + uintptr_t as_int = reinterpret_cast(function); + if (sizeof(function) == 4) return static_cast(as_int); + uint64_t as_64 = static_cast(as_int); + return + static_cast(as_64 >> 32) ^ + static_cast(as_64); +} + + +CallbackTable::CallbackTable(): map_(Match, 64) {} + + +bool CallbackTable::Contains(void* function) { + ASSERT(function != NULL); + return map_.Lookup(function, Hash(function), false) != NULL; +} + + +void CallbackTable::InsertCallback(Isolate* isolate, + void* function, + bool returns_void) { + if (function == NULL) return; + // Don't store for performance. + if (kStoreVoidFunctions != returns_void) return; + CallbackTable* table = isolate->callback_table(); + if (table == NULL) { + table = new CallbackTable(); + isolate->set_callback_table(table); + } + typedef HashMap::Entry Entry; + Entry* entry = table->map_.Lookup(function, Hash(function), true); + ASSERT(entry != NULL); + ASSERT(entry->value == NULL || entry->value == function); + entry->value = function; +} + + +template +template +v8::Handle CustomArguments::GetReturnValue(Isolate* isolate) { + // Check the ReturnValue. + Object** handle = &this->end()[kReturnValueOffset]; + // Nothing was set, return empty handle as per previous behaviour. + if ((*handle)->IsTheHole()) return v8::Handle(); + return v8::Handle(reinterpret_cast(handle)); +} + + +v8::Handle FunctionCallbackArguments::Call(InvocationCallback f) { + Isolate* isolate = this->isolate(); + void* f_as_void = CallbackTable::FunctionToVoidPtr(f); + bool new_style = CallbackTable::ReturnsVoid(isolate, f_as_void); + if (new_style) { + FunctionCallback c = reinterpret_cast(f); + FunctionCallbackInfo info(end(), + argv_, + argc_, + is_construct_call_); + c(info); + } else { + v8::Arguments args(end(), + argv_, + argc_, + is_construct_call_); + v8::Handle return_value = f(args); + if (!return_value.IsEmpty()) return return_value; + } + return GetReturnValue(isolate); +} + + +#define WRITE_CALL_0(OldFunction, NewFunction, ReturnValue) \ +v8::Handle PropertyCallbackArguments::Call(OldFunction f) { \ + Isolate* isolate = this->isolate(); \ + void* f_as_void = CallbackTable::FunctionToVoidPtr(f); \ + bool new_style = CallbackTable::ReturnsVoid(isolate, f_as_void); \ + if (new_style) { \ + NewFunction c = reinterpret_cast(f); \ + PropertyCallbackInfo info(end()); \ + c(info); \ + } else { \ + v8::AccessorInfo info(end()); \ + v8::Handle return_value = f(info); \ + if (!return_value.IsEmpty()) return return_value; \ + } \ + return GetReturnValue(isolate); \ +} + +#define WRITE_CALL_1(OldFunction, NewFunction, ReturnValue, Arg1) \ +v8::Handle PropertyCallbackArguments::Call(OldFunction f, \ + Arg1 arg1) { \ + Isolate* isolate = this->isolate(); \ + void* f_as_void = CallbackTable::FunctionToVoidPtr(f); \ + bool new_style = CallbackTable::ReturnsVoid(isolate, f_as_void); \ + if (new_style) { \ + NewFunction c = reinterpret_cast(f); \ + PropertyCallbackInfo info(end()); \ + c(arg1, info); \ + } else { \ + v8::AccessorInfo info(end()); \ + v8::Handle return_value = f(arg1, info); \ + if (!return_value.IsEmpty()) return return_value; \ + } \ + return GetReturnValue(isolate); \ +} + +#define WRITE_CALL_2(OldFunction, NewFunction, ReturnValue, Arg1, Arg2) \ +v8::Handle PropertyCallbackArguments::Call(OldFunction f, \ + Arg1 arg1, \ + Arg2 arg2) { \ + Isolate* isolate = this->isolate(); \ + void* f_as_void = CallbackTable::FunctionToVoidPtr(f); \ + bool new_style = CallbackTable::ReturnsVoid(isolate, f_as_void); \ + if (new_style) { \ + NewFunction c = reinterpret_cast(f); \ + PropertyCallbackInfo info(end()); \ + c(arg1, arg2, info); \ + } else { \ + v8::AccessorInfo info(end()); \ + v8::Handle return_value = f(arg1, arg2, info); \ + if (!return_value.IsEmpty()) return return_value; \ + } \ + return GetReturnValue(isolate); \ +} + +#define WRITE_CALL_2_VOID(OldFunction, NewFunction, ReturnValue, Arg1, Arg2) \ +void PropertyCallbackArguments::Call(OldFunction f, \ + Arg1 arg1, \ + Arg2 arg2) { \ + Isolate* isolate = this->isolate(); \ + void* f_as_void = CallbackTable::FunctionToVoidPtr(f); \ + bool new_style = CallbackTable::ReturnsVoid(isolate, f_as_void); \ + if (new_style) { \ + NewFunction c = reinterpret_cast(f); \ + PropertyCallbackInfo info(end()); \ + c(arg1, arg2, info); \ + } else { \ + v8::AccessorInfo info(end()); \ + f(arg1, arg2, info); \ + } \ +} + +FOR_EACH_CALLBACK_TABLE_MAPPING_0(WRITE_CALL_0) +FOR_EACH_CALLBACK_TABLE_MAPPING_1(WRITE_CALL_1) +FOR_EACH_CALLBACK_TABLE_MAPPING_2(WRITE_CALL_2) +FOR_EACH_CALLBACK_TABLE_MAPPING_2_VOID_RETURN(WRITE_CALL_2_VOID) + +#undef WRITE_CALL_0 +#undef WRITE_CALL_1 +#undef WRITE_CALL_2 +#undef WRITE_CALL_2_VOID + + +} } // namespace v8::internal + diff --git a/deps/v8/src/arguments.h b/deps/v8/src/arguments.h index 1423d5642..a80b61361 100644 --- a/deps/v8/src/arguments.h +++ b/deps/v8/src/arguments.h @@ -82,35 +82,258 @@ class Arguments BASE_EMBEDDED { }; +// mappings from old property callbacks to new ones +// F(old name, new name, return value, parameters...) +// +// These aren't included in the list as they have duplicate signatures +// F(NamedPropertyEnumerator, NamedPropertyEnumeratorCallback, ...) +// F(NamedPropertyGetter, NamedPropertyGetterCallback, ...) + +#define FOR_EACH_CALLBACK_TABLE_MAPPING_0(F) \ + F(IndexedPropertyEnumerator, IndexedPropertyEnumeratorCallback, v8::Array) \ + +#define FOR_EACH_CALLBACK_TABLE_MAPPING_1(F) \ + F(AccessorGetter, AccessorGetterCallback, v8::Value, v8::Local) \ + F(NamedPropertyQuery, \ + NamedPropertyQueryCallback, \ + v8::Integer, \ + v8::Local) \ + F(NamedPropertyDeleter, \ + NamedPropertyDeleterCallback, \ + v8::Boolean, \ + v8::Local) \ + F(IndexedPropertyGetter, \ + IndexedPropertyGetterCallback, \ + v8::Value, \ + uint32_t) \ + F(IndexedPropertyQuery, \ + IndexedPropertyQueryCallback, \ + v8::Integer, \ + uint32_t) \ + F(IndexedPropertyDeleter, \ + IndexedPropertyDeleterCallback, \ + v8::Boolean, \ + uint32_t) \ + +#define FOR_EACH_CALLBACK_TABLE_MAPPING_2(F) \ + F(NamedPropertySetter, \ + NamedPropertySetterCallback, \ + v8::Value, \ + v8::Local, \ + v8::Local) \ + F(IndexedPropertySetter, \ + IndexedPropertySetterCallback, \ + v8::Value, \ + uint32_t, \ + v8::Local) \ + +#define FOR_EACH_CALLBACK_TABLE_MAPPING_2_VOID_RETURN(F) \ + F(AccessorSetter, \ + AccessorSetterCallback, \ + void, \ + v8::Local, \ + v8::Local) \ + +// All property callbacks as well as invocation callbacks +#define FOR_EACH_CALLBACK_TABLE_MAPPING(F) \ + F(InvocationCallback, FunctionCallback) \ + F(AccessorGetter, AccessorGetterCallback) \ + F(AccessorSetter, AccessorSetterCallback) \ + F(NamedPropertySetter, NamedPropertySetterCallback) \ + F(NamedPropertyQuery, NamedPropertyQueryCallback) \ + F(NamedPropertyDeleter, NamedPropertyDeleterCallback) \ + F(IndexedPropertyGetter, IndexedPropertyGetterCallback) \ + F(IndexedPropertySetter, IndexedPropertySetterCallback) \ + F(IndexedPropertyQuery, IndexedPropertyQueryCallback) \ + F(IndexedPropertyDeleter, IndexedPropertyDeleterCallback) \ + F(IndexedPropertyEnumerator, IndexedPropertyEnumeratorCallback) \ + + +// TODO(dcarney): Remove this class when old callbacks are gone. +class CallbackTable { + public: + // TODO(dcarney): Flip this when it makes sense for performance. + static const bool kStoreVoidFunctions = true; + static inline bool ReturnsVoid(Isolate* isolate, void* function) { + CallbackTable* table = isolate->callback_table(); + bool contains = + table != NULL && + table->map_.occupancy() != 0 && + table->Contains(function); + return contains == kStoreVoidFunctions; + } + + STATIC_ASSERT(sizeof(intptr_t) == sizeof(AccessorGetterCallback)); + + template + static inline void* FunctionToVoidPtr(F function) { + return reinterpret_cast(reinterpret_cast(function)); + } + +#define WRITE_REGISTER(OldFunction, NewFunction) \ + static OldFunction Register(Isolate* isolate, NewFunction f) { \ + InsertCallback(isolate, FunctionToVoidPtr(f), true); \ + return reinterpret_cast(f); \ + } \ + \ + static OldFunction Register(Isolate* isolate, OldFunction f) { \ + InsertCallback(isolate, FunctionToVoidPtr(f), false); \ + return f; \ + } + FOR_EACH_CALLBACK_TABLE_MAPPING(WRITE_REGISTER) +#undef WRITE_REGISTER + + private: + CallbackTable(); + bool Contains(void* function); + static void InsertCallback(Isolate* isolate, + void* function, + bool returns_void); + HashMap map_; + DISALLOW_COPY_AND_ASSIGN(CallbackTable); +}; + + // Custom arguments replicate a small segment of stack that can be // accessed through an Arguments object the same way the actual stack // can. -class CustomArguments : public Relocatable { +template +class CustomArgumentsBase : public Relocatable { + public: + virtual inline void IterateInstance(ObjectVisitor* v) { + v->VisitPointers(values_, values_ + kArrayLength); + } + protected: + inline Object** end() { return values_ + kArrayLength - 1; } + explicit inline CustomArgumentsBase(Isolate* isolate) + : Relocatable(isolate) {} + Object* values_[kArrayLength]; +}; + + +template +class CustomArguments : public CustomArgumentsBase { public: - inline CustomArguments(Isolate* isolate, - Object* data, - Object* self, - JSObject* holder) : Relocatable(isolate) { - ASSERT(reinterpret_cast(isolate)->IsSmi()); - values_[3] = self; - values_[2] = holder; - values_[1] = data; - values_[0] = reinterpret_cast(isolate); + static const int kReturnValueOffset = T::kReturnValueIndex; + + typedef CustomArgumentsBase Super; + ~CustomArguments() { + // TODO(dcarney): create a new zap value for this. + this->end()[kReturnValueOffset] = + reinterpret_cast(kHandleZapValue); + } + + protected: + explicit inline CustomArguments(Isolate* isolate) : Super(isolate) {} + + template + v8::Handle GetReturnValue(Isolate* isolate); + + inline Isolate* isolate() { + return reinterpret_cast(this->end()[T::kIsolateIndex]); } +}; + + +class PropertyCallbackArguments + : public CustomArguments > { + public: + typedef PropertyCallbackInfo T; + typedef CustomArguments Super; + static const int kArgsLength = T::kArgsLength; + static const int kThisIndex = T::kThisIndex; + static const int kHolderIndex = T::kHolderIndex; + + PropertyCallbackArguments(Isolate* isolate, + Object* data, + Object* self, + JSObject* holder) + : Super(isolate) { + Object** values = this->end(); + values[T::kThisIndex] = self; + values[T::kHolderIndex] = holder; + values[T::kDataIndex] = data; + values[T::kIsolateIndex] = reinterpret_cast(isolate); + values[T::kReturnValueIndex] = isolate->heap()->the_hole_value(); + ASSERT(values[T::kHolderIndex]->IsHeapObject()); + ASSERT(values[T::kIsolateIndex]->IsSmi()); + } + + /* + * The following Call functions wrap the calling of all callbacks to handle + * calling either the old or the new style callbacks depending on which one + * has been registered. + * For old callbacks which return an empty handle, the ReturnValue is checked + * and used if it's been set to anything inside the callback. + * New style callbacks always use the return value. + */ +#define WRITE_CALL_0(OldFunction, NewFunction, ReturnValue) \ + v8::Handle Call(OldFunction f); \ + +#define WRITE_CALL_1(OldFunction, NewFunction, ReturnValue, Arg1) \ + v8::Handle Call(OldFunction f, Arg1 arg1); \ + +#define WRITE_CALL_2(OldFunction, NewFunction, ReturnValue, Arg1, Arg2) \ + v8::Handle Call(OldFunction f, Arg1 arg1, Arg2 arg2); \ + +#define WRITE_CALL_2_VOID(OldFunction, NewFunction, ReturnValue, Arg1, Arg2) \ + void Call(OldFunction f, Arg1 arg1, Arg2 arg2); \ + +FOR_EACH_CALLBACK_TABLE_MAPPING_0(WRITE_CALL_0) +FOR_EACH_CALLBACK_TABLE_MAPPING_1(WRITE_CALL_1) +FOR_EACH_CALLBACK_TABLE_MAPPING_2(WRITE_CALL_2) +FOR_EACH_CALLBACK_TABLE_MAPPING_2_VOID_RETURN(WRITE_CALL_2_VOID) + +#undef WRITE_CALL_0 +#undef WRITE_CALL_1 +#undef WRITE_CALL_2 +#undef WRITE_CALL_2_VOID +}; + + +class FunctionCallbackArguments + : public CustomArguments > { + public: + typedef FunctionCallbackInfo T; + typedef CustomArguments Super; + static const int kArgsLength = T::kArgsLength; - inline explicit CustomArguments(Isolate* isolate) : Relocatable(isolate) { -#ifdef DEBUG - for (size_t i = 0; i < ARRAY_SIZE(values_); i++) { - values_[i] = reinterpret_cast(kZapValue); - } -#endif + FunctionCallbackArguments(internal::Isolate* isolate, + internal::Object* data, + internal::JSFunction* callee, + internal::Object* holder, + internal::Object** argv, + int argc, + bool is_construct_call) + : Super(isolate), + argv_(argv), + argc_(argc), + is_construct_call_(is_construct_call) { + Object** values = end(); + values[T::kDataIndex] = data; + values[T::kCalleeIndex] = callee; + values[T::kHolderIndex] = holder; + values[T::kIsolateIndex] = reinterpret_cast(isolate); + values[T::kReturnValueIndex] = isolate->heap()->the_hole_value(); + ASSERT(values[T::kCalleeIndex]->IsJSFunction()); + ASSERT(values[T::kHolderIndex]->IsHeapObject()); + ASSERT(values[T::kIsolateIndex]->IsSmi()); } - void IterateInstance(ObjectVisitor* v); - Object** end() { return values_ + ARRAY_SIZE(values_) - 1; } + /* + * The following Call function wraps the calling of all callbacks to handle + * calling either the old or the new style callbacks depending on which one + * has been registered. + * For old callbacks which return an empty handle, the ReturnValue is checked + * and used if it's been set to anything inside the callback. + * New style callbacks always use the return value. + */ + v8::Handle Call(InvocationCallback f); private: - Object* values_[4]; + internal::Object** argv_; + int argc_; + bool is_construct_call_; }; diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc index b39d9ee12..0102f337b 100644 --- a/deps/v8/src/arm/assembler-arm.cc +++ b/deps/v8/src/arm/assembler-arm.cc @@ -2473,6 +2473,23 @@ void Assembler::vcvt_f32_f64(const SwVfpRegister dst, } +void Assembler::vcvt_f64_s32(const DwVfpRegister dst, + int fraction_bits, + const Condition cond) { + // Instruction details available in ARM DDI 0406C.b, A8-874. + // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 1010(19-16) | Vd(15-12) | + // 101(11-9) | sf=1(8) | sx=1(7) | 1(6) | i(5) | 0(4) | imm4(3-0) + ASSERT(fraction_bits > 0 && fraction_bits <= 32); + ASSERT(CpuFeatures::IsSupported(VFP3)); + int vd, d; + dst.split_code(&vd, &d); + int i = ((32 - fraction_bits) >> 4) & 1; + int imm4 = (32 - fraction_bits) & 0xf; + emit(cond | 0xE*B24 | B23 | d*B22 | 0x3*B20 | B19 | 0x2*B16 | + vd*B12 | 0x5*B9 | B8 | B7 | B6 | i*B5 | imm4); +} + + void Assembler::vneg(const DwVfpRegister dst, const DwVfpRegister src, const Condition cond) { @@ -3000,7 +3017,8 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) { // Put down constant pool marker "Undefined instruction". // The data size helps disassembly know what to print. - emit(kConstantPoolMarker | EncodeConstantPoolLength(size_after_marker)); + emit(kConstantPoolMarker | + EncodeConstantPoolLength(size_after_marker / kPointerSize)); if (require_64_bit_align) { emit(kConstantPoolMarker); diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h index 0fd518673..3000860ba 100644 --- a/deps/v8/src/arm/assembler-arm.h +++ b/deps/v8/src/arm/assembler-arm.h @@ -459,6 +459,17 @@ class Operand BASE_EMBEDDED { // rm shift_imm explicit Operand(Register rm, ShiftOp shift_op, int shift_imm); + INLINE(static Operand SmiUntag(Register rm)) { + return Operand(rm, ASR, kSmiTagSize); + } + INLINE(static Operand PointerOffsetFromSmiKey(Register key)) { + STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); + return Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize); + } + INLINE(static Operand DoubleOffsetFromSmiKey(Register key)) { + STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kDoubleSizeLog2); + return Operand(key, LSL, kDoubleSizeLog2 - kSmiTagSize); + } // rm rs explicit Operand(Register rm, ShiftOp shift_op, Register rs); @@ -515,6 +526,12 @@ class MemOperand BASE_EMBEDDED { // [rn], +/- rm shift_imm PostIndex/NegPostIndex explicit MemOperand(Register rn, Register rm, ShiftOp shift_op, int shift_imm, AddrMode am = Offset); + INLINE(static MemOperand PointerAddressFromSmiKey(Register array, + Register key, + AddrMode am = Offset)) { + STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); + return MemOperand(array, key, LSL, kPointerSizeLog2 - kSmiTagSize, am); + } void set_offset(int32_t offset) { ASSERT(rm_.is(no_reg)); @@ -1032,6 +1049,9 @@ class Assembler : public AssemblerBase { const DwVfpRegister src, VFPConversionMode mode = kDefaultRoundToZero, const Condition cond = al); + void vcvt_f64_s32(const DwVfpRegister dst, + int fraction_bits, + const Condition cond = al); void vneg(const DwVfpRegister dst, const DwVfpRegister src, diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc index 3cc2797e9..6333924ca 100644 --- a/deps/v8/src/arm/builtins-arm.cc +++ b/deps/v8/src/arm/builtins-arm.cc @@ -215,12 +215,9 @@ static void AllocateJSArray(MacroAssembler* masm, // Allocate the JSArray object together with space for a FixedArray with the // requested number of elements. - STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); __ mov(elements_array_end, Operand((JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize)); - __ add(elements_array_end, - elements_array_end, - Operand(array_size, ASR, kSmiTagSize)); + __ add(elements_array_end, elements_array_end, Operand::SmiUntag(array_size)); __ Allocate(elements_array_end, result, scratch1, @@ -249,7 +246,6 @@ static void AllocateJSArray(MacroAssembler* masm, FieldMemOperand(result, JSArray::kElementsOffset)); // Clear the heap tag on the elements array. - STATIC_ASSERT(kSmiTag == 0); __ sub(elements_array_storage, elements_array_storage, Operand(kHeapObjectTag)); @@ -261,7 +257,6 @@ static void AllocateJSArray(MacroAssembler* masm, __ LoadRoot(scratch1, Heap::kFixedArrayMapRootIndex); ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset); __ str(scratch1, MemOperand(elements_array_storage, kPointerSize, PostIndex)); - STATIC_ASSERT(kSmiTag == 0); ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset); __ str(array_size, MemOperand(elements_array_storage, kPointerSize, PostIndex)); @@ -270,10 +265,9 @@ static void AllocateJSArray(MacroAssembler* masm, // result: JSObject // elements_array_storage: elements array element storage // array_size: smi-tagged size of elements array - STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); __ add(elements_array_end, elements_array_storage, - Operand(array_size, LSL, kPointerSizeLog2 - kSmiTagSize)); + Operand::PointerOffsetFromSmiKey(array_size)); // Fill the allocated FixedArray with the hole value if requested. // result: JSObject @@ -335,7 +329,6 @@ void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code) { __ bind(&argc_one_or_more); __ cmp(r0, Operand(1)); __ b(ne, &argc_two_or_more); - STATIC_ASSERT(kSmiTag == 0); __ ldr(r2, MemOperand(sp)); // Get the argument from the stack. __ tst(r2, r2); __ b(ne, ¬_empty_array); @@ -344,6 +337,7 @@ void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code) { __ b(&empty_array); __ bind(¬_empty_array); + STATIC_ASSERT(kSmiTag == 0); __ and_(r3, r2, Operand(kIntptrSignBit | kSmiTagMask), SetCC); __ b(ne, call_generic_code); @@ -375,7 +369,7 @@ void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code) { // Handle construction of an array from a list of arguments. __ bind(&argc_two_or_more); - __ mov(r2, Operand(r0, LSL, kSmiTagSize)); // Convet argc to a smi. + __ SmiTag(r2, r0); // r0: argc // r1: constructor @@ -478,7 +472,7 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) { if (FLAG_debug_code) { // Initial map for the builtin InternalArray functions should be maps. __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); - __ tst(r2, Operand(kSmiTagMask)); + __ SmiTst(r2); __ Assert(ne, "Unexpected initial map for InternalArray function"); __ CompareObjectType(r2, r3, r4, MAP_TYPE); __ Assert(eq, "Unexpected initial map for InternalArray function"); @@ -512,7 +506,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) { if (FLAG_debug_code) { // Initial map for the builtin Array functions should be maps. __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); - __ tst(r2, Operand(kSmiTagMask)); + __ SmiTst(r2); __ Assert(ne, "Unexpected initial map for Array function"); __ CompareObjectType(r2, r3, r4, MAP_TYPE); __ Assert(eq, "Unexpected initial map for Array function"); @@ -545,7 +539,7 @@ void Builtins::Generate_CommonArrayConstructCode(MacroAssembler* masm) { // Array functions which always have a map. // Initial map for the builtin Array function should be a map. __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); - __ tst(r3, Operand(kSmiTagMask)); + __ SmiTst(r3); __ Assert(ne, "Unexpected initial map for Array function"); __ CompareObjectType(r3, r3, r4, MAP_TYPE); __ Assert(eq, "Unexpected initial map for Array function"); @@ -778,7 +772,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, FrameScope scope(masm, StackFrame::CONSTRUCT); // Preserve the two incoming parameters on the stack. - __ mov(r0, Operand(r0, LSL, kSmiTagSize)); + __ SmiTag(r0); __ push(r0); // Smi-tagged arguments count. __ push(r1); // Constructor function. @@ -931,7 +925,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); __ str(r6, MemOperand(r2, kPointerSize, PostIndex)); ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset); - __ mov(r0, Operand(r3, LSL, kSmiTagSize)); + __ SmiTag(r0, r3); __ str(r0, MemOperand(r2, kPointerSize, PostIndex)); // Initialize the fields to undefined. @@ -1004,7 +998,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset)); // Set up number of arguments for function call below - __ mov(r0, Operand(r3, LSR, kSmiTagSize)); + __ SmiUntag(r0, r3); // Copy arguments and receiver to the expression stack. // r0: number of arguments @@ -1340,6 +1334,11 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) { } +void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) { + Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT); +} + + void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) { Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY); } @@ -1454,7 +1453,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { { // Enter an internal frame in order to preserve argument count. FrameScope scope(masm, StackFrame::INTERNAL); - __ mov(r0, Operand(r0, LSL, kSmiTagSize)); // Smi-tagged. + __ SmiTag(r0); __ push(r0); __ push(r2); @@ -1462,7 +1461,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { __ mov(r2, r0); __ pop(r0); - __ mov(r0, Operand(r0, ASR, kSmiTagSize)); + __ SmiUntag(r0); // Exit the internal frame. } @@ -1565,7 +1564,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); __ ldr(r2, FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset)); - __ mov(r2, Operand(r2, ASR, kSmiTagSize)); + __ SmiUntag(r2); __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); __ SetCallKind(r5, CALL_AS_METHOD); __ cmp(r2, r0); // Check formal and actual parameter counts. @@ -1604,7 +1603,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { // here which will cause r2 to become negative. __ sub(r2, sp, r2); // Check if the arguments will overflow the stack. - __ cmp(r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ cmp(r2, Operand::PointerOffsetFromSmiKey(r0)); __ b(gt, &okay); // Signed comparison. // Out of stack space. @@ -1714,7 +1713,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { // Invoke the function. Label call_proxy; ParameterCount actual(r0); - __ mov(r0, Operand(r0, ASR, kSmiTagSize)); + __ SmiUntag(r0); __ ldr(r1, MemOperand(fp, kFunctionOffset)); __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE); __ b(ne, &call_proxy); @@ -1743,7 +1742,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) { - __ mov(r0, Operand(r0, LSL, kSmiTagSize)); + __ SmiTag(r0); __ mov(r4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); __ stm(db_w, sp, r0.bit() | r1.bit() | r4.bit() | fp.bit() | lr.bit()); __ add(fp, sp, Operand(3 * kPointerSize)); @@ -1759,7 +1758,7 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) { __ ldr(r1, MemOperand(fp, -3 * kPointerSize)); __ mov(sp, fp); __ ldm(ia_w, sp, fp.bit() | lr.bit()); - __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ add(sp, sp, Operand::PointerOffsetFromSmiKey(r1)); __ add(sp, sp, Operand(kPointerSize)); // adjust for receiver } @@ -1790,7 +1789,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { // r1: function // r2: expected number of arguments // r3: code entry to call - __ add(r0, fp, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ add(r0, fp, Operand::PointerOffsetFromSmiKey(r0)); // adjust for return address and receiver __ add(r0, r0, Operand(2 * kPointerSize)); __ sub(r2, r0, Operand(r2, LSL, kPointerSizeLog2)); @@ -1821,7 +1820,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { // r1: function // r2: expected number of arguments // r3: code entry to call - __ add(r0, fp, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ add(r0, fp, Operand::PointerOffsetFromSmiKey(r0)); // Copy the arguments (including the receiver) to the new stack frame. // r0: copy start address diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc index 86da76ac3..c667c9072 100644 --- a/deps/v8/src/arm/code-stubs-arm.cc +++ b/deps/v8/src/arm/code-stubs-arm.cc @@ -307,8 +307,8 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) { // The optimized code map must never be empty, so check the first elements. Label install_optimized; // Speculatively move code object into r4. - __ ldr(r4, FieldMemOperand(r1, FixedArray::kHeaderSize + kPointerSize)); - __ ldr(r5, FieldMemOperand(r1, FixedArray::kHeaderSize)); + __ ldr(r4, FieldMemOperand(r1, SharedFunctionInfo::kFirstCodeSlot)); + __ ldr(r5, FieldMemOperand(r1, SharedFunctionInfo::kFirstContextSlot)); __ cmp(r2, r5); __ b(eq, &install_optimized); @@ -317,19 +317,17 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) { __ ldr(r4, FieldMemOperand(r1, FixedArray::kLengthOffset)); __ bind(&loop); // Do not double check first entry. - - __ cmp(r4, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength))); + __ cmp(r4, Operand(Smi::FromInt(SharedFunctionInfo::kSecondEntryIndex))); __ b(eq, &install_unoptimized); - __ sub(r4, r4, Operand( - Smi::FromInt(SharedFunctionInfo::kEntryLength))); // Skip an entry. + __ sub(r4, r4, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength))); __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ add(r5, r5, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ add(r5, r5, Operand::PointerOffsetFromSmiKey(r4)); __ ldr(r5, MemOperand(r5)); __ cmp(r2, r5); __ b(ne, &loop); // Hit: fetch the optimized code. __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ add(r5, r5, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ add(r5, r5, Operand::PointerOffsetFromSmiKey(r4)); __ add(r5, r5, Operand(kPointerSize)); __ ldr(r4, MemOperand(r5)); @@ -521,8 +519,7 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) { Register mantissa = result2_; Label not_special; - // Convert from Smi to integer. - __ mov(source_, Operand(source_, ASR, kSmiTagSize)); + __ SmiUntag(source_); // Move sign bit from source to destination. This works because the sign bit // in the exponent word of the double has the same position and polarity as // the 2's complement sign bit in a Smi. @@ -772,7 +769,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, // Lhs is a smi, rhs is a number. // Convert lhs to a double in d7. - __ SmiToDoubleVFPRegister(lhs, d7, r7, s15); + __ SmiToDouble(d7, lhs); // Load the double from rhs, tagged HeapNumber r0, to d6. __ sub(r7, rhs, Operand(kHeapObjectTag)); __ vldr(d6, r7, HeapNumber::kValueOffset); @@ -803,7 +800,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, __ sub(r7, lhs, Operand(kHeapObjectTag)); __ vldr(d7, r7, HeapNumber::kValueOffset); // Convert rhs to a double in d6 . - __ SmiToDoubleVFPRegister(rhs, d6, r7, s13); + __ SmiToDouble(d6, rhs); // Fall through to both_loaded_as_doubles. } @@ -1230,7 +1227,7 @@ void ToBooleanStub::Generate(MacroAssembler* masm) { if (types_.Contains(SMI)) { // Smis: 0 -> false, all other -> true - __ tst(tos_, Operand(kSmiTagMask)); + __ SmiTst(tos_); // tos_ contains the correct return value already __ Ret(eq); } else if (types_.NeedsMap()) { @@ -1535,7 +1532,7 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm, __ b(mi, &try_float); // Tag the result as a smi and we're done. - __ mov(r0, Operand(r1, LSL, kSmiTagSize)); + __ SmiTag(r0, r1); __ Ret(); // Try to store the result in a heap number. @@ -1882,9 +1879,7 @@ void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm, __ GetLeastBitsFromSmi(scratch2, right, 5); __ mov(scratch1, Operand(scratch1, LSL, scratch2)); // Check that the signed result fits in a Smi. - __ add(scratch2, scratch1, Operand(0x40000000), SetCC); - __ b(mi, ¬_smi_result); - __ SmiTag(right, scratch1); + __ TrySmiTag(right, scratch1, ¬_smi_result); __ Ret(); break; default: @@ -1946,12 +1941,8 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, // Load left and right operands into d0 and d1. if (smi_operands) { - __ SmiUntag(scratch1, right); - __ vmov(d1.high(), scratch1); - __ vcvt_f64_s32(d1, d1.high()); - __ SmiUntag(scratch1, left); - __ vmov(d0.high(), scratch1); - __ vcvt_f64_s32(d0, d0.high()); + __ SmiToDouble(d1, right); + __ SmiToDouble(d0, left); } else { // Load right operand into d1. if (right_type == BinaryOpIC::INT32) { @@ -2062,9 +2053,7 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, } // Check that the *signed* result fits in a smi. - __ add(r3, r2, Operand(0x40000000), SetCC); - __ b(mi, &result_not_a_smi); - __ SmiTag(r0, r2); + __ TrySmiTag(r0, r2, &result_not_a_smi); __ Ret(); // Allocate new heap number for result. @@ -2124,7 +2113,6 @@ void BinaryOpStub_GenerateSmiCode( // Perform combined smi check on both operands. __ orr(scratch1, left, Operand(right)); - STATIC_ASSERT(kSmiTag == 0); __ JumpIfNotSmi(scratch1, ¬_smis); // If the smi-smi operation results in a smi return is generated. @@ -2162,8 +2150,12 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { GenerateTypeTransition(masm); __ bind(&call_runtime); - GenerateRegisterArgsPush(masm); - GenerateCallRuntime(masm); + { + FrameScope scope(masm, StackFrame::INTERNAL); + GenerateRegisterArgsPush(masm); + GenerateCallRuntime(masm); + } + __ Ret(); } @@ -2188,7 +2180,8 @@ void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE); __ b(ge, &call_runtime); - StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB); + StringAddStub string_add_stub((StringAddFlags) + (ERECT_FRAME | NO_STRING_CHECK_IN_STUB)); GenerateRegisterArgsPush(masm); __ TailCallStub(&string_add_stub); @@ -2408,12 +2401,9 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { UNREACHABLE(); } - // Check if the result fits in a smi. - __ add(scratch1, r2, Operand(0x40000000), SetCC); - // If not try to return a heap number. (We know the result is an int32.) - __ b(mi, &return_heap_number); - // Tag the result and return. - __ SmiTag(r0, r2); + // Check if the result fits in a smi. If not try to return a heap number. + // (We know the result is an int32). + __ TrySmiTag(r0, r2, &return_heap_number); __ Ret(); __ bind(&return_heap_number); @@ -2459,8 +2449,12 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { } __ bind(&call_runtime); - GenerateRegisterArgsPush(masm); - GenerateCallRuntime(masm); + { + FrameScope scope(masm, StackFrame::INTERNAL); + GenerateRegisterArgsPush(masm); + GenerateCallRuntime(masm); + } + __ Ret(); } @@ -2507,8 +2501,12 @@ void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) { GenerateTypeTransition(masm); __ bind(&call_runtime); - GenerateRegisterArgsPush(masm); - GenerateCallRuntime(masm); + { + FrameScope scope(masm, StackFrame::INTERNAL); + GenerateRegisterArgsPush(masm); + GenerateCallRuntime(masm); + } + __ Ret(); } @@ -2531,8 +2529,12 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { } __ bind(&call_runtime); - GenerateRegisterArgsPush(masm); - GenerateCallRuntime(masm); + { + FrameScope scope(masm, StackFrame::INTERNAL); + GenerateRegisterArgsPush(masm); + GenerateCallRuntime(masm); + } + __ Ret(); } @@ -2548,7 +2550,8 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE); __ b(ge, &left_not_string); - StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB); + StringAddStub string_add_left_stub((StringAddFlags) + (ERECT_FRAME | NO_STRING_CHECK_LEFT_IN_STUB)); GenerateRegisterArgsPush(masm); __ TailCallStub(&string_add_left_stub); @@ -2558,7 +2561,8 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE); __ b(ge, &call_runtime); - StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB); + StringAddStub string_add_right_stub((StringAddFlags) + (ERECT_FRAME | NO_STRING_CHECK_RIGHT_IN_STUB)); GenerateRegisterArgsPush(masm); __ TailCallStub(&string_add_right_stub); @@ -2627,7 +2631,8 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { // Input is a smi. Convert to double and load the low and high words // of the double into r2, r3. - __ IntegerToDoubleConversionWithVFP3(r0, r3, r2); + __ SmiToDouble(d7, r0); + __ vmov(r2, r3, d7); __ b(&loaded); __ bind(&input_not_smi); @@ -3825,7 +3830,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { // Read the argument from the stack and return it. __ sub(r3, r0, r1); - __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ add(r3, fp, Operand::PointerOffsetFromSmiKey(r3)); __ ldr(r0, MemOperand(r3, kDisplacement)); __ Jump(lr); @@ -3839,7 +3844,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { // Read the argument from the adaptor frame and return it. __ sub(r3, r0, r1); - __ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ add(r3, r2, Operand::PointerOffsetFromSmiKey(r3)); __ ldr(r0, MemOperand(r3, kDisplacement)); __ Jump(lr); @@ -4092,7 +4097,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { __ bind(&adaptor_frame); __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset)); __ str(r1, MemOperand(sp, 0)); - __ add(r3, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ add(r3, r2, Operand::PointerOffsetFromSmiKey(r1)); __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset)); __ str(r3, MemOperand(sp, 1 * kPointerSize)); @@ -4100,9 +4105,8 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { // of the arguments object and the elements array in words. Label add_arguments_object; __ bind(&try_allocate); - __ cmp(r1, Operand::Zero()); + __ SmiUntag(r1, SetCC); __ b(eq, &add_arguments_object); - __ mov(r1, Operand(r1, LSR, kSmiTagSize)); __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize)); __ bind(&add_arguments_object); __ add(r1, r1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize)); @@ -4141,8 +4145,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex); __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset)); __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset)); - // Untag the length for the loop. - __ mov(r1, Operand(r1, LSR, kSmiTagSize)); + __ SmiUntag(r1); // Copy the fixed array slots. Label loop; @@ -4211,7 +4214,6 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Check that the first argument is a JSRegExp object. __ ldr(r0, MemOperand(sp, kJSRegExpOffset)); - STATIC_ASSERT(kSmiTag == 0); __ JumpIfSmi(r0, &runtime); __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE); __ b(ne, &runtime); @@ -4219,7 +4221,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Check that the RegExp has been compiled (data contains a fixed array). __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset)); if (FLAG_debug_code) { - __ tst(regexp_data, Operand(kSmiTagMask)); + __ SmiTst(regexp_data); __ Check(ne, "Unexpected type for RegExp data, FixedArray expected"); __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE); __ Check(eq, "Unexpected type for RegExp data, FixedArray expected"); @@ -4324,7 +4326,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ ldr(r3, FieldMemOperand(r3, String::kLengthOffset)); __ cmp(r3, Operand(r1)); __ b(ls, &runtime); - __ mov(r1, Operand(r1, ASR, kSmiTagSize)); + __ SmiUntag(r1); STATIC_ASSERT(4 == kOneByteStringTag); STATIC_ASSERT(kTwoByteStringTag == 0); @@ -4399,7 +4401,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ add(r2, r9, Operand(r1, LSL, r3)); __ ldr(r8, FieldMemOperand(subject, String::kLengthOffset)); - __ mov(r8, Operand(r8, ASR, kSmiTagSize)); + __ SmiUntag(r8); __ add(r3, r9, Operand(r8, LSL, r3)); // Argument 2 (r1): Previous index. @@ -4486,13 +4488,13 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ ldr(r0, FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset)); __ add(r2, r1, Operand(RegExpImpl::kLastMatchOverhead)); - __ cmp(r2, Operand(r0, ASR, kSmiTagSize)); + __ cmp(r2, Operand::SmiUntag(r0)); __ b(gt, &runtime); // r1: number of capture registers // r4: subject string // Store the capture count. - __ mov(r2, Operand(r1, LSL, kSmiTagSize + kSmiShiftSize)); // To smi. + __ SmiTag(r2, r1); __ str(r2, FieldMemOperand(last_match_info_elements, RegExpImpl::kLastCaptureCountOffset)); // Store last subject and last input. @@ -4536,7 +4538,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Read the value from the static offsets vector buffer. __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex)); // Store the smi value in the last match info. - __ mov(r3, Operand(r3, LSL, kSmiTagSize)); + __ SmiTag(r3); __ str(r3, MemOperand(r0, kPointerSize, PostIndex)); __ jmp(&next_capture); __ bind(&done); @@ -4584,7 +4586,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // (9) Sliced string. Replace subject with parent. Go to (4). // Load offset into r9 and replace subject string with parent. __ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset)); - __ mov(r9, Operand(r9, ASR, kSmiTagSize)); + __ SmiUntag(r9); __ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset)); __ jmp(&check_underlying); // Go to (4). #endif // V8_INTERPRETED_REGEXP @@ -4611,7 +4613,7 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) { // FixedArray. int objects_size = (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize; - __ mov(r5, Operand(r1, LSR, kSmiTagSize + kSmiShiftSize)); + __ SmiUntag(r5, r1); __ add(r2, r5, Operand(objects_size)); __ Allocate( r2, // In: Size, in words. @@ -4654,7 +4656,7 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) { __ mov(r2, Operand(factory->fixed_array_map())); __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); // Set FixedArray length. - __ mov(r6, Operand(r5, LSL, kSmiTagSize)); + __ SmiTag(r6, r5); __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset)); // Fill contents of fixed-array with undefined. __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); @@ -4771,6 +4773,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) { __ bind(&megamorphic); __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); __ str(ip, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset)); + __ jmp(&done); // An uninitialized cache is patched with the function or sentinel to // indicate the ElementsKind if function is the Array constructor. @@ -4970,7 +4973,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { __ cmp(ip, Operand(index_)); __ b(ls, index_out_of_range_); - __ mov(index_, Operand(index_, ASR, kSmiTagSize)); + __ SmiUntag(index_); StringCharLoadGenerator::Generate(masm, object_, @@ -4978,7 +4981,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { result_, &call_runtime_); - __ mov(result_, Operand(result_, LSL, kSmiTagSize)); + __ SmiTag(result_); __ bind(&exit_); } @@ -5024,7 +5027,7 @@ void StringCharCodeAtGenerator::GenerateSlow( // is too complex (e.g., when the string needs to be flattened). __ bind(&call_runtime_); call_helper.BeforeCall(masm); - __ mov(index_, Operand(index_, LSL, kSmiTagSize)); + __ SmiTag(index_); __ Push(object_, index_); __ CallRuntime(Runtime::kStringCharCodeAt, 2); __ Move(result_, r0); @@ -5050,8 +5053,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); // At this point code register contains smi tagged ASCII char code. - STATIC_ASSERT(kSmiTag == 0); - __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ add(result_, result_, Operand::PointerOffsetFromSmiKey(code_)); __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); __ CompareRoot(result_, Heap::kUndefinedValueRootIndex); __ b(eq, &slow_case_); @@ -5476,9 +5478,8 @@ void SubStringStub::Generate(MacroAssembler* masm) { // Make sure first argument is a string. __ ldr(r0, MemOperand(sp, kStringOffset)); - STATIC_ASSERT(kSmiTag == 0); // Do a JumpIfSmi, but fold its jump into the subsequent string test. - __ tst(r0, Operand(kSmiTagMask)); + __ SmiTst(r0); Condition is_string = masm->IsObjectStringType(r0, r1, ne); ASSERT(is_string == eq); __ b(NegateCondition(is_string), &runtime); @@ -5822,7 +5823,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // Second argument. // Make sure that both arguments are strings if not known in advance. - if (flags_ == NO_STRING_ADD_FLAGS) { + if ((flags_ & NO_STRING_ADD_FLAGS) != 0) { __ JumpIfEitherSmi(r0, r1, &call_runtime); // Load instance types. __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); @@ -5875,8 +5876,8 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ bind(&strings_not_empty); } - __ mov(r2, Operand(r2, ASR, kSmiTagSize)); - __ mov(r3, Operand(r3, ASR, kSmiTagSize)); + __ SmiUntag(r2); + __ SmiUntag(r3); // Both strings are non-empty. // r0: first string // r1: second string @@ -6114,15 +6115,49 @@ void StringAddStub::Generate(MacroAssembler* masm) { // Just jump to runtime to add the two strings. __ bind(&call_runtime); - __ TailCallRuntime(Runtime::kStringAdd, 2, 1); + if ((flags_ & ERECT_FRAME) != 0) { + GenerateRegisterArgsPop(masm); + // Build a frame + { + FrameScope scope(masm, StackFrame::INTERNAL); + GenerateRegisterArgsPush(masm); + __ CallRuntime(Runtime::kStringAdd, 2); + } + __ Ret(); + } else { + __ TailCallRuntime(Runtime::kStringAdd, 2, 1); + } if (call_builtin.is_linked()) { __ bind(&call_builtin); - __ InvokeBuiltin(builtin_id, JUMP_FUNCTION); + if ((flags_ & ERECT_FRAME) != 0) { + GenerateRegisterArgsPop(masm); + // Build a frame + { + FrameScope scope(masm, StackFrame::INTERNAL); + GenerateRegisterArgsPush(masm); + __ InvokeBuiltin(builtin_id, CALL_FUNCTION); + } + __ Ret(); + } else { + __ InvokeBuiltin(builtin_id, JUMP_FUNCTION); + } } } +void StringAddStub::GenerateRegisterArgsPush(MacroAssembler* masm) { + __ push(r0); + __ push(r1); +} + + +void StringAddStub::GenerateRegisterArgsPop(MacroAssembler* masm) { + __ pop(r1); + __ pop(r0); +} + + void StringAddStub::GenerateConvertArgument(MacroAssembler* masm, int stack_offset, Register arg, @@ -6184,7 +6219,7 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) { } else { // Untag before subtracting to avoid handling overflow. __ SmiUntag(r1); - __ sub(r0, r1, SmiUntagOperand(r0)); + __ sub(r0, r1, Operand::SmiUntag(r0)); } __ Ret(); @@ -6218,10 +6253,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { __ vldr(d1, r2, HeapNumber::kValueOffset); __ b(&left); __ bind(&right_smi); - __ SmiUntag(r2, r0); // Can't clobber r0 yet. - SwVfpRegister single_scratch = d2.low(); - __ vmov(single_scratch, r2); - __ vcvt_f64_s32(d1, single_scratch); + __ SmiToDouble(d1, r0); __ bind(&left); __ JumpIfSmi(r1, &left_smi); @@ -6231,10 +6263,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { __ vldr(d0, r2, HeapNumber::kValueOffset); __ b(&done); __ bind(&left_smi); - __ SmiUntag(r2, r1); // Can't clobber r1 yet. - single_scratch = d3.low(); - __ vmov(single_scratch, r2); - __ vcvt_f64_s32(d0, single_scratch); + __ SmiToDouble(d0, r1); __ bind(&done); // Compare operands. @@ -6645,7 +6674,7 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, // Compute the capacity mask. __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset)); - __ mov(scratch1, Operand(scratch1, ASR, kSmiTagSize)); // convert smi to int + __ SmiUntag(scratch1); __ sub(scratch1, scratch1, Operand(1)); // Generate an unrolled loop that performs a few probes before @@ -6726,7 +6755,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) { Label in_dictionary, maybe_in_dictionary, not_in_dictionary; __ ldr(mask, FieldMemOperand(dictionary, kCapacityOffset)); - __ mov(mask, Operand(mask, ASR, kSmiTagSize)); + __ SmiUntag(mask); __ sub(mask, mask, Operand(1)); __ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset)); @@ -7124,7 +7153,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object. __ bind(&fast_elements); __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); - __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ add(r6, r5, Operand::PointerOffsetFromSmiKey(r3)); __ add(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); __ str(r0, MemOperand(r6, 0)); // Update the write barrier for the array store. @@ -7136,7 +7165,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { // and value is Smi. __ bind(&smi_element); __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); - __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ add(r6, r5, Operand::PointerOffsetFromSmiKey(r3)); __ str(r0, FieldMemOperand(r6, FixedArray::kHeaderSize)); __ Ret(); diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h index 0b1a8b847..863848cc3 100644 --- a/deps/v8/src/arm/code-stubs-arm.h +++ b/deps/v8/src/arm/code-stubs-arm.h @@ -211,11 +211,13 @@ class StringHelper : public AllStatic { // Flag that indicates how to generate code for the stub StringAddStub. enum StringAddFlags { - NO_STRING_ADD_FLAGS = 0, + NO_STRING_ADD_FLAGS = 1 << 0, // Omit left string check in stub (left is definitely a string). - NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0, + NO_STRING_CHECK_LEFT_IN_STUB = 1 << 1, // Omit right string check in stub (right is definitely a string). - NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1, + NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 2, + // Stub needs a frame before calling the runtime + ERECT_FRAME = 1 << 3, // Omit both string checks in stub. NO_STRING_CHECK_IN_STUB = NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB @@ -241,6 +243,9 @@ class StringAddStub: public PlatformCodeStub { Register scratch4, Label* slow); + void GenerateRegisterArgsPush(MacroAssembler* masm); + void GenerateRegisterArgsPop(MacroAssembler* masm); + const StringAddFlags flags_; }; diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc index 9d773d4cc..7bf253a33 100644 --- a/deps/v8/src/arm/codegen-arm.cc +++ b/deps/v8/src/arm/codegen-arm.cc @@ -440,7 +440,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm, Label indirect_string_loaded; __ ldr(result, FieldMemOperand(string, SlicedString::kOffsetOffset)); __ ldr(string, FieldMemOperand(string, SlicedString::kParentOffset)); - __ add(index, index, Operand(result, ASR, kSmiTagSize)); + __ add(index, index, Operand::SmiUntag(result)); __ jmp(&indirect_string_loaded); // Handle cons strings. @@ -510,9 +510,9 @@ void SeqStringSetCharGenerator::Generate(MacroAssembler* masm, Register index, Register value) { if (FLAG_debug_code) { - __ tst(index, Operand(kSmiTagMask)); + __ SmiTst(index); __ Check(eq, "Non-smi index"); - __ tst(value, Operand(kSmiTagMask)); + __ SmiTst(value); __ Check(eq, "Non-smi value"); __ ldr(ip, FieldMemOperand(string, String::kLengthOffset)); @@ -540,10 +540,10 @@ void SeqStringSetCharGenerator::Generate(MacroAssembler* masm, STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); if (encoding == String::ONE_BYTE_ENCODING) { // Smis are tagged by left shift by 1, thus LSR by 1 to smi-untag inline. - __ strb(value, MemOperand(ip, index, LSR, 1)); + __ strb(value, MemOperand(ip, index, LSR, kSmiTagSize)); } else { // No need to untag a smi for two-byte addressing. - __ strh(value, MemOperand(ip, index)); + __ strh(value, MemOperand(ip, index)); // LSL(1 - kSmiTagSize). } } diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc index 6bfaf414c..2f0a7c4e5 100644 --- a/deps/v8/src/arm/debug-arm.cc +++ b/deps/v8/src/arm/debug-arm.cc @@ -132,7 +132,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm, __ tst(reg, Operand(0xc0000000)); __ Assert(eq, "Unable to encode value as smi"); } - __ mov(reg, Operand(reg, LSL, kSmiTagSize)); + __ SmiTag(reg); } } __ stm(db_w, sp, object_regs | non_object_regs); @@ -154,7 +154,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm, int r = JSCallerSavedCode(i); Register reg = { r }; if ((non_object_regs & (1 << r)) != 0) { - __ mov(reg, Operand(reg, LSR, kSmiTagSize)); + __ SmiUntag(reg); } if (FLAG_debug_code && (((object_regs |non_object_regs) & (1 << r)) == 0)) { diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc index 001d3c830..d973889bb 100644 --- a/deps/v8/src/arm/deoptimizer-arm.cc +++ b/deps/v8/src/arm/deoptimizer-arm.cc @@ -53,14 +53,13 @@ void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList( ASSERT(function->IsOptimized()); ASSERT(function->FunctionsInFunctionListShareSameCode()); - // The optimized code is going to be patched, so we cannot use it - // any more. Play safe and reset the whole cache. - function->shared()->ClearOptimizedCodeMap(); - // Get the optimized code. Code* code = function->code(); Address code_start_address = code->instruction_start(); + // The optimized code is going to be patched, so we cannot use it any more. + function->shared()->EvictFromOptimizedCodeMap(code, "deoptimized function"); + // Invalidate the relocation information, as it will become invalid by the // code patching below, and is not needed any more. code->InvalidateRelocation(); @@ -277,7 +276,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() { if (FLAG_trace_osr) { PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ", reinterpret_cast(function_)); - function_->PrintName(); + PrintFunctionName(); PrintF(" => node=%u, frame=%d->%d]\n", ast_id, input_frame_size, @@ -371,189 +370,12 @@ void Deoptimizer::DoComputeOsrOutputFrame() { PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ", ok ? "finished" : "aborted", reinterpret_cast(function_)); - function_->PrintName(); + PrintFunctionName(); PrintF(" => pc=0x%0x]\n", output_[0]->GetPc()); } } -// This code is very similar to ia32 code, but relies on register names (fp, sp) -// and how the frame is laid out. -void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator, - int frame_index) { - // Read the ast node id, function, and frame height for this output frame. - BailoutId node_id = BailoutId(iterator->Next()); - JSFunction* function; - if (frame_index != 0) { - function = JSFunction::cast(ComputeLiteral(iterator->Next())); - } else { - int closure_id = iterator->Next(); - USE(closure_id); - ASSERT_EQ(Translation::kSelfLiteralId, closure_id); - function = function_; - } - unsigned height = iterator->Next(); - unsigned height_in_bytes = height * kPointerSize; - if (trace_) { - PrintF(" translating "); - function->PrintName(); - PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes); - } - - // The 'fixed' part of the frame consists of the incoming parameters and - // the part described by JavaScriptFrameConstants. - unsigned fixed_frame_size = ComputeFixedSize(function); - unsigned input_frame_size = input_->GetFrameSize(); - unsigned output_frame_size = height_in_bytes + fixed_frame_size; - - // Allocate and store the output frame description. - FrameDescription* output_frame = - new(output_frame_size) FrameDescription(output_frame_size, function); - output_frame->SetFrameType(StackFrame::JAVA_SCRIPT); - - bool is_bottommost = (0 == frame_index); - bool is_topmost = (output_count_ - 1 == frame_index); - ASSERT(frame_index >= 0 && frame_index < output_count_); - ASSERT(output_[frame_index] == NULL); - output_[frame_index] = output_frame; - - // The top address for the bottommost output frame can be computed from - // the input frame pointer and the output frame's height. For all - // subsequent output frames, it can be computed from the previous one's - // top address and the current frame's size. - uint32_t top_address; - if (is_bottommost) { - // 2 = context and function in the frame. - top_address = - input_->GetRegister(fp.code()) - (2 * kPointerSize) - height_in_bytes; - } else { - top_address = output_[frame_index - 1]->GetTop() - output_frame_size; - } - output_frame->SetTop(top_address); - - // Compute the incoming parameter translation. - int parameter_count = function->shared()->formal_parameter_count() + 1; - unsigned output_offset = output_frame_size; - unsigned input_offset = input_frame_size; - for (int i = 0; i < parameter_count; ++i) { - output_offset -= kPointerSize; - DoTranslateCommand(iterator, frame_index, output_offset); - } - input_offset -= (parameter_count * kPointerSize); - - // There are no translation commands for the caller's pc and fp, the - // context, and the function. Synthesize their values and set them up - // explicitly. - // - // The caller's pc for the bottommost output frame is the same as in the - // input frame. For all subsequent output frames, it can be read from the - // previous one. This frame's pc can be computed from the non-optimized - // function code and AST id of the bailout. - output_offset -= kPointerSize; - input_offset -= kPointerSize; - intptr_t value; - if (is_bottommost) { - value = input_->GetFrameSlot(input_offset); - } else { - value = output_[frame_index - 1]->GetPc(); - } - output_frame->SetFrameSlot(output_offset, value); - if (trace_) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n", - top_address + output_offset, output_offset, value); - } - - // The caller's frame pointer for the bottommost output frame is the same - // as in the input frame. For all subsequent output frames, it can be - // read from the previous one. Also compute and set this frame's frame - // pointer. - output_offset -= kPointerSize; - input_offset -= kPointerSize; - if (is_bottommost) { - value = input_->GetFrameSlot(input_offset); - } else { - value = output_[frame_index - 1]->GetFp(); - } - output_frame->SetFrameSlot(output_offset, value); - intptr_t fp_value = top_address + output_offset; - ASSERT(!is_bottommost || input_->GetRegister(fp.code()) == fp_value); - output_frame->SetFp(fp_value); - if (is_topmost) { - output_frame->SetRegister(fp.code(), fp_value); - } - if (trace_) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n", - fp_value, output_offset, value); - } - - // For the bottommost output frame the context can be gotten from the input - // frame. For all subsequent output frames it can be gotten from the function - // so long as we don't inline functions that need local contexts. - output_offset -= kPointerSize; - input_offset -= kPointerSize; - if (is_bottommost) { - value = input_->GetFrameSlot(input_offset); - } else { - value = reinterpret_cast(function->context()); - } - output_frame->SetFrameSlot(output_offset, value); - output_frame->SetContext(value); - if (is_topmost) output_frame->SetRegister(cp.code(), value); - if (trace_) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n", - top_address + output_offset, output_offset, value); - } - - // The function was mentioned explicitly in the BEGIN_FRAME. - output_offset -= kPointerSize; - input_offset -= kPointerSize; - value = reinterpret_cast(function); - // The function for the bottommost output frame should also agree with the - // input frame. - ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value); - output_frame->SetFrameSlot(output_offset, value); - if (trace_) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n", - top_address + output_offset, output_offset, value); - } - - // Translate the rest of the frame. - for (unsigned i = 0; i < height; ++i) { - output_offset -= kPointerSize; - DoTranslateCommand(iterator, frame_index, output_offset); - } - ASSERT(0 == output_offset); - - // Compute this frame's PC, state, and continuation. - Code* non_optimized_code = function->shared()->code(); - FixedArray* raw_data = non_optimized_code->deoptimization_data(); - DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data); - Address start = non_optimized_code->instruction_start(); - unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared()); - unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state); - uint32_t pc_value = reinterpret_cast(start + pc_offset); - output_frame->SetPc(pc_value); - if (is_topmost) { - output_frame->SetRegister(pc.code(), pc_value); - } - - FullCodeGenerator::State state = - FullCodeGenerator::StateField::decode(pc_and_state); - output_frame->SetState(Smi::FromInt(state)); - - - // Set the continuation for the topmost frame. - if (is_topmost && bailout_type_ != DEBUGGER) { - Builtins* builtins = isolate_->builtins(); - Code* continuation = (bailout_type_ == EAGER) - ? builtins->builtin(Builtins::kNotifyDeoptimized) - : builtins->builtin(Builtins::kNotifyLazyDeoptimized); - output_frame->SetContinuation( - reinterpret_cast(continuation->entry())); - } -} - - void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) { // Set the register values. The values are not important as there are no // callee saved registers in JavaScript frames, so all registers are @@ -597,6 +419,12 @@ void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) { } +bool Deoptimizer::HasAlignmentPadding(JSFunction* function) { + // There is no dynamic alignment padding on ARM in the input frame. + return false; +} + + #define __ masm()-> // This code tries to be close to ia32 code so that any changes can be @@ -640,7 +468,7 @@ void Deoptimizer::EntryGenerator::Generate() { // Get the address of the location in the code object if possible (r3) (return // address for lazy deoptimization) and compute the fp-to-sp delta in // register r4. - if (type() == EAGER) { + if (type() == EAGER || type() == SOFT) { __ mov(r3, Operand::Zero()); // Correct one word for bailout id. __ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); @@ -695,7 +523,7 @@ void Deoptimizer::EntryGenerator::Generate() { // Remove the bailout id, eventually return address, and the saved registers // from the stack. - if (type() == EAGER || type() == OSR) { + if (type() == EAGER || type() == SOFT || type() == OSR) { __ add(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); } else { __ add(sp, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize))); @@ -814,7 +642,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() { for (int i = 0; i < count(); i++) { int start = masm()->pc_offset(); USE(start); - if (type() == EAGER) { + if (type() == EAGER || type() == SOFT) { __ nop(); } else { // Emulate ia32 like call by pushing return address to stack. diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc index b84d35535..f55552df5 100644 --- a/deps/v8/src/arm/disasm-arm.cc +++ b/deps/v8/src/arm/disasm-arm.cc @@ -1102,6 +1102,7 @@ int Decoder::DecodeType7(Instruction* instr) { // vmov: Rt = Sn // vcvt: Dd = Sm // vcvt: Sd = Dm +// vcvt.f64.s32 Dd, Dd, # // Dd = vabs(Dm) // Dd = vneg(Dm) // Dd = vadd(Dn, Dm) @@ -1138,6 +1139,13 @@ void Decoder::DecodeTypeVFP(Instruction* instr) { DecodeVCVTBetweenDoubleAndSingle(instr); } else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) { DecodeVCVTBetweenFloatingPointAndInteger(instr); + } else if ((instr->Opc2Value() == 0xA) && (instr->Opc3Value() == 0x3) && + (instr->Bit(8) == 1)) { + // vcvt.f64.s32 Dd, Dd, # + int fraction_bits = 32 - ((instr->Bit(5) << 4) | instr->Bits(3, 0)); + Format(instr, "vcvt'cond.f64.s32 'Dd, 'Dd"); + out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, + ", #%d", fraction_bits); } else if (((instr->Opc2Value() >> 1) == 0x6) && (instr->Opc3Value() & 0x1)) { DecodeVCVTBetweenFloatingPointAndInteger(instr); diff --git a/deps/v8/src/arm/frames-arm.cc b/deps/v8/src/arm/frames-arm.cc index 5cbe77afc..f5a7dbd3e 100644 --- a/deps/v8/src/arm/frames-arm.cc +++ b/deps/v8/src/arm/frames-arm.cc @@ -45,6 +45,10 @@ Address ExitFrame::ComputeStackPointer(Address fp) { } +Register JavaScriptFrame::fp_register() { return v8::internal::fp; } +Register JavaScriptFrame::context_register() { return cp; } + + Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; } Register StubFailureTrampolineFrame::context_register() { return cp; } diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc index 0bc1f48c8..33a499c27 100644 --- a/deps/v8/src/arm/full-codegen-arm.cc +++ b/deps/v8/src/arm/full-codegen-arm.cc @@ -175,6 +175,7 @@ void FullCodeGenerator::Generate() { // Adjust FP to point to saved FP. __ add(fp, sp, Operand(2 * kPointerSize)); } + info->AddNoFrameRange(0, masm_->pc_offset()); { Comment cmnt(masm_, "[ Allocate locals"); int locals_count = info->scope()->num_stack_slots(); @@ -438,9 +439,11 @@ void FullCodeGenerator::EmitReturnSequence() { PredictableCodeSizeScope predictable(masm_, -1); __ RecordJSReturn(); masm_->mov(sp, fp); + int no_frame_start = masm_->pc_offset(); masm_->ldm(ia_w, sp, fp.bit() | lr.bit()); masm_->add(sp, sp, Operand(sp_delta)); masm_->Jump(lr); + info_->AddNoFrameRange(no_frame_start, masm_->pc_offset()); } #ifdef DEBUG @@ -1195,7 +1198,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { // Get the current entry of the array into register r3. __ ldr(r2, MemOperand(sp, 2 * kPointerSize)); __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ ldr(r3, MemOperand::PointerAddressFromSmiKey(r2, r0)); // Get the expected map from the stack or a smi in the // permanent slow case into register r2. @@ -1961,8 +1964,102 @@ void FullCodeGenerator::VisitYield(Yield* expr) { break; } - case Yield::DELEGATING: - UNIMPLEMENTED(); + case Yield::DELEGATING: { + VisitForStackValue(expr->generator_object()); + + // Initial stack layout is as follows: + // [sp + 1 * kPointerSize] iter + // [sp + 0 * kPointerSize] g + + Label l_catch, l_try, l_resume, l_send, l_call, l_loop; + // Initial send value is undefined. + __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); + __ b(&l_send); + + // catch (e) { receiver = iter; f = iter.throw; arg = e; goto l_call; } + __ bind(&l_catch); + handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos())); + __ ldr(r3, MemOperand(sp, 1 * kPointerSize)); // iter + __ push(r3); // iter + __ push(r0); // exception + __ mov(r0, r3); // iter + __ push(r0); // push LoadIC state + __ LoadRoot(r2, Heap::kthrow_stringRootIndex); // "throw" + Handle throw_ic = isolate()->builtins()->LoadIC_Initialize(); + CallIC(throw_ic); // iter.throw in r0 + __ add(sp, sp, Operand(kPointerSize)); // drop LoadIC state + __ jmp(&l_call); + + // try { received = yield result.value } + __ bind(&l_try); + __ pop(r0); // result.value + __ PushTryHandler(StackHandler::CATCH, expr->index()); + const int handler_size = StackHandlerConstants::kSize; + __ push(r0); // result.value + __ ldr(r3, MemOperand(sp, (0 + 1) * kPointerSize + handler_size)); // g + __ push(r3); // g + __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1); + __ ldr(context_register(), + MemOperand(fp, StandardFrameConstants::kContextOffset)); + __ CompareRoot(r0, Heap::kTheHoleValueRootIndex); + __ b(ne, &l_resume); + EmitReturnIteratorResult(false); + __ bind(&l_resume); // received in r0 + __ PopTryHandler(); + + // receiver = iter; f = iter.send; arg = received; + __ bind(&l_send); + __ ldr(r3, MemOperand(sp, 1 * kPointerSize)); // iter + __ push(r3); // iter + __ push(r0); // received + __ mov(r0, r3); // iter + __ push(r0); // push LoadIC state + __ LoadRoot(r2, Heap::ksend_stringRootIndex); // "send" + Handle send_ic = isolate()->builtins()->LoadIC_Initialize(); + CallIC(send_ic); // iter.send in r0 + __ add(sp, sp, Operand(kPointerSize)); // drop LoadIC state + + // result = f.call(receiver, arg); + __ bind(&l_call); + Label l_call_runtime; + __ JumpIfSmi(r0, &l_call_runtime); + __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE); + __ b(ne, &l_call_runtime); + __ mov(r1, r0); + ParameterCount count(1); + __ InvokeFunction(r1, count, CALL_FUNCTION, + NullCallWrapper(), CALL_AS_METHOD); + __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); + __ jmp(&l_loop); + __ bind(&l_call_runtime); + __ push(r0); + __ CallRuntime(Runtime::kCall, 3); + + // val = result.value; if (!result.done) goto l_try; + __ bind(&l_loop); + // result.value + __ push(r0); // save result + __ LoadRoot(r2, Heap::kvalue_stringRootIndex); // "value" + Handle value_ic = isolate()->builtins()->LoadIC_Initialize(); + CallIC(value_ic); // result.value in r0 + __ pop(r1); // result + __ push(r0); // result.value + __ mov(r0, r1); // result + __ push(r0); // push LoadIC state + __ LoadRoot(r2, Heap::kdone_stringRootIndex); // "done" + Handle done_ic = isolate()->builtins()->LoadIC_Initialize(); + CallIC(done_ic); // result.done in r0 + __ add(sp, sp, Operand(kPointerSize)); // drop LoadIC state + ToBooleanStub stub(r0); + __ CallStub(&stub); + __ cmp(r0, Operand(0)); + __ b(eq, &l_try); + + // result.value + __ pop(r0); // result.value + context()->DropAndPlug(2, r0); // drop iter and g + break; + } } } @@ -2166,23 +2263,18 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr, // BinaryOpStub::GenerateSmiSmiOperation for comments. switch (op) { case Token::SAR: - __ b(&stub_call); __ GetLeastBitsFromSmi(scratch1, right, 5); __ mov(right, Operand(left, ASR, scratch1)); __ bic(right, right, Operand(kSmiTagMask)); break; case Token::SHL: { - __ b(&stub_call); __ SmiUntag(scratch1, left); __ GetLeastBitsFromSmi(scratch2, right, 5); __ mov(scratch1, Operand(scratch1, LSL, scratch2)); - __ add(scratch2, scratch1, Operand(0x40000000), SetCC); - __ b(mi, &stub_call); - __ SmiTag(right, scratch1); + __ TrySmiTag(right, scratch1, &stub_call); break; } case Token::SHR: { - __ b(&stub_call); __ SmiUntag(scratch1, left); __ GetLeastBitsFromSmi(scratch2, right, 5); __ mov(scratch1, Operand(scratch1, LSR, scratch2)); @@ -2761,7 +2853,7 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) { &if_true, &if_false, &fall_through); PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); - __ tst(r0, Operand(kSmiTagMask)); + __ SmiTst(r0); Split(eq, if_true, if_false, fall_through); context()->Plug(if_true, if_false); @@ -2782,7 +2874,7 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) { &if_true, &if_false, &fall_through); PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); - __ tst(r0, Operand(kSmiTagMask | 0x80000000)); + __ NonNegativeSmiTst(r0); Split(eq, if_true, if_false, fall_through); context()->Plug(if_true, if_false); @@ -2909,16 +3001,13 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( __ LoadInstanceDescriptors(r1, r4); // r4: descriptor array. // r3: valid entries in the descriptor array. - STATIC_ASSERT(kSmiTag == 0); - STATIC_ASSERT(kSmiTagSize == 1); - STATIC_ASSERT(kPointerSize == 4); __ mov(ip, Operand(DescriptorArray::kDescriptorSize)); __ mul(r3, r3, ip); // Calculate location of the first key name. __ add(r4, r4, Operand(DescriptorArray::kFirstOffset - kHeapObjectTag)); // Calculate the end of the descriptor array. __ mov(r2, r4); - __ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ add(r2, r2, Operand::PointerOffsetFromSmiKey(r3)); // Loop through all the keys in the descriptor array. If one of these is the // string "valueOf" the result is false. @@ -3686,12 +3775,11 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) { Label done, not_found; // tmp now holds finger offset as a smi. - STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1); __ ldr(r2, FieldMemOperand(cache, JSFunctionResultCache::kFingerOffset)); // r2 now holds finger offset as a smi. __ add(r3, cache, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); // r3 now points to the start of fixed array elements. - __ ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2 - kSmiTagSize, PreIndex)); + __ ldr(r2, MemOperand::PointerAddressFromSmiKey(r3, r2, PreIndex)); // Note side effect of PreIndex: r3 now points to the key of the pair. __ cmp(key, r2); __ b(ne, ¬_found); @@ -4654,9 +4742,7 @@ void FullCodeGenerator::EnterFinallyBlock() { __ push(result_register()); // Cook return address in link register to stack (smi encoded Code* delta) __ sub(r1, lr, Operand(masm_->CodeObject())); - ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize); - STATIC_ASSERT(kSmiTag == 0); - __ add(r1, r1, Operand(r1)); // Convert to smi. + __ SmiTag(r1); // Store result register while executing finally block. __ push(r1); @@ -4710,8 +4796,7 @@ void FullCodeGenerator::ExitFinallyBlock() { // Uncook return address and return. __ pop(result_register()); - ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize); - __ mov(r1, Operand(r1, ASR, 1)); // Un-smi-tag value. + __ SmiUntag(r1); __ add(pc, r1, Operand(masm_->CodeObject())); } diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc index c644be59d..14c4794f4 100644 --- a/deps/v8/src/arm/ic-arm.cc +++ b/deps/v8/src/arm/ic-arm.cc @@ -290,10 +290,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, __ b(hs, out_of_range); // Fast case: Do the load. __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - // The key is a smi. - STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); - __ ldr(scratch2, - MemOperand(scratch1, key, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ ldr(scratch2, MemOperand::PointerAddressFromSmiKey(scratch1, key)); __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); __ cmp(scratch2, ip); // In case the loaded value is the_hole we have to consult GetProperty @@ -567,7 +564,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) { __ LoadRoot(ip, Heap::kHashTableMapRootIndex); __ cmp(r3, ip); __ b(ne, &slow_load); - __ mov(r0, Operand(r2, ASR, kSmiTagSize)); + __ SmiUntag(r0, r2); // r0: untagged index __ LoadFromNumberDictionary(&slow_load, r4, r2, r1, r0, r3, r5); __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1, r0, r3); @@ -960,7 +957,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ LoadRoot(ip, Heap::kHashTableMapRootIndex); __ cmp(r3, ip); __ b(ne, &slow); - __ mov(r2, Operand(r0, ASR, kSmiTagSize)); + __ SmiUntag(r2, r0); __ LoadFromNumberDictionary(&slow, r4, r0, r0, r2, r3, r5); __ Ret(); @@ -1133,7 +1130,7 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { __ JumpIfSmi(r1, &slow); // Check that the key is an array index, that is Uint32. - __ tst(r0, Operand(kSmiTagMask | kSmiSignMask)); + __ NonNegativeSmiTst(r0); __ b(ne, &slow); // Get the map of the receiver. @@ -1194,7 +1191,7 @@ void StoreIC::GenerateSlow(MacroAssembler* masm) { // The slow case calls into the runtime to complete the store without causing // an IC miss that would otherwise cause a transition to the generic stub. ExternalReference ref = - ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate()); + ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate()); __ TailCallExternalReference(ref, 3, 1); } @@ -1321,8 +1318,7 @@ static void KeyedStoreGenerateGenericHelper( } // It's irrelevant whether array is smi-only or not when writing a smi. __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ str(value, MemOperand(address)); + __ str(value, MemOperand::PointerAddressFromSmiKey(address, key)); __ Ret(); __ bind(&non_smi_value); @@ -1338,7 +1334,7 @@ static void KeyedStoreGenerateGenericHelper( __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); } __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ add(address, address, Operand::PointerOffsetFromSmiKey(key)); __ str(value, MemOperand(address)); // Update write barrier for the elements array address. __ mov(scratch_value, value); // Preserve the value which is returned. diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc index 3fe46ffd7..e1bb69eac 100644 --- a/deps/v8/src/arm/lithium-arm.cc +++ b/deps/v8/src/arm/lithium-arm.cc @@ -217,15 +217,6 @@ void LCmpIDAndBranch::PrintDataTo(StringStream* stream) { } -void LIsNilAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if "); - value()->PrintTo(stream); - stream->Add(kind() == kStrictEquality ? " === " : " == "); - stream->Add(nil() == kNullValue ? "null" : "undefined"); - stream->Add(" then B%d else B%d", true_block_id(), false_block_id()); -} - - void LIsObjectAndBranch::PrintDataTo(StringStream* stream) { stream->Add("if is_object("); value()->PrintTo(stream); @@ -989,6 +980,10 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) { } +LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) { + return new(zone()) LDebugBreak(); +} + LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) { ASSERT(instr->value()->representation().IsTagged()); @@ -1459,7 +1454,8 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) { } if (instr->CheckFlag(HValue::kBailoutOnMinusZero) || - instr->CheckFlag(HValue::kCanBeDivByZero)) { + instr->CheckFlag(HValue::kCanBeDivByZero) || + instr->CheckFlag(HValue::kCanOverflow)) { return AssignEnvironment(DefineAsRegister(mod)); } else { return DefineAsRegister(mod); @@ -1718,12 +1714,6 @@ LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch( } -LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) { - ASSERT(instr->value()->representation().IsTagged()); - return new(zone()) LIsNilAndBranch(UseRegisterAtStart(instr->value())); -} - - LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) { ASSERT(instr->value()->representation().IsTagged()); LOperand* value = UseRegisterAtStart(instr->value()); @@ -1836,7 +1826,7 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) { LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) { LOperand* string = UseRegister(instr->string()); LOperand* index = UseRegister(instr->index()); - LOperand* value = UseRegister(instr->value()); + LOperand* value = UseTempRegister(instr->value()); LSeqStringSetChar* result = new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value); return DefineAsRegister(result); @@ -2333,7 +2323,9 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) { LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL; LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp); - if (FLAG_track_fields && instr->field_representation().IsSmi()) { + if ((FLAG_track_fields && instr->field_representation().IsSmi()) || + (FLAG_track_heap_object_fields && + instr->field_representation().IsHeapObject())) { return AssignEnvironment(result); } return result; @@ -2398,16 +2390,6 @@ LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) { } -LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) { - return MarkAsCall(DefineFixed(new(zone()) LArrayLiteral, r0), instr); -} - - -LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) { - return MarkAsCall(DefineFixed(new(zone()) LObjectLiteral, r0), instr); -} - - LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) { return MarkAsCall(DefineFixed(new(zone()) LRegExpLiteral, r0), instr); } diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h index 116d57621..9bcd44ae0 100644 --- a/deps/v8/src/arm/lithium-arm.h +++ b/deps/v8/src/arm/lithium-arm.h @@ -56,7 +56,6 @@ class LCodeGen; V(ArgumentsLength) \ V(ArithmeticD) \ V(ArithmeticT) \ - V(ArrayLiteral) \ V(BitI) \ V(BitNotI) \ V(BoundsCheck) \ @@ -90,6 +89,7 @@ class LCodeGen; V(ConstantI) \ V(ConstantT) \ V(Context) \ + V(DebugBreak) \ V(DeclareGlobals) \ V(DeleteProperty) \ V(Deoptimize) \ @@ -114,7 +114,6 @@ class LCodeGen; V(Uint32ToDouble) \ V(InvokeFunction) \ V(IsConstructCallAndBranch) \ - V(IsNilAndBranch) \ V(IsObjectAndBranch) \ V(IsStringAndBranch) \ V(IsSmiAndBranch) \ @@ -152,7 +151,6 @@ class LCodeGen; V(NumberTagI) \ V(NumberTagU) \ V(NumberUntagD) \ - V(ObjectLiteral) \ V(OsrEntry) \ V(OuterContext) \ V(Parameter) \ @@ -698,6 +696,12 @@ class LMultiplySubD: public LTemplateInstruction<1, 3, 0> { }; +class LDebugBreak: public LTemplateInstruction<0, 0, 0> { + public: + DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break") +}; + + class LCmpIDAndBranch: public LControlInstruction<2, 0> { public: LCmpIDAndBranch(LOperand* left, LOperand* right) { @@ -887,24 +891,6 @@ class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> { }; -class LIsNilAndBranch: public LControlInstruction<1, 0> { - public: - explicit LIsNilAndBranch(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch) - - EqualityKind kind() const { return hydrogen()->kind(); } - NilValue nil() const { return hydrogen()->nil(); } - - virtual void PrintDataTo(StringStream* stream); -}; - - class LIsObjectAndBranch: public LControlInstruction<1, 1> { public: LIsObjectAndBranch(LOperand* value, LOperand* temp) { @@ -2462,20 +2448,6 @@ class LAllocate: public LTemplateInstruction<1, 2, 2> { }; -class LArrayLiteral: public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal") - DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral) -}; - - -class LObjectLiteral: public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal") - DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral) -}; - - class LRegExpLiteral: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal") diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc index 3a0f476b5..09a0e9c06 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.cc +++ b/deps/v8/src/arm/lithium-codegen-arm.cc @@ -95,6 +95,12 @@ void LCodeGen::FinishCode(Handle code) { transition_maps_.at(i)->AddDependentCode( DependentCode::kTransitionGroup, code); } + if (graph()->depends_on_empty_array_proto_elements()) { + isolate()->initial_object_prototype()->map()->AddDependentCode( + DependentCode::kElementsCantBeAddedGroup, code); + isolate()->initial_array_prototype()->map()->AddDependentCode( + DependentCode::kElementsCantBeAddedGroup, code); + } } @@ -354,9 +360,7 @@ bool LCodeGen::GenerateDeoptJumpTable() { for (int i = 0; i < deopt_jump_table_.length(); i++) { __ bind(&deopt_jump_table_[i].label); Address entry = deopt_jump_table_[i].address; - bool is_lazy_deopt = deopt_jump_table_[i].is_lazy_deopt; - Deoptimizer::BailoutType type = - is_lazy_deopt ? Deoptimizer::LAZY : Deoptimizer::EAGER; + Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type; int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); if (id == Deoptimizer::kNotDeoptimizationEntry) { Comment(";;; jump table entry %d.", i); @@ -365,7 +369,7 @@ bool LCodeGen::GenerateDeoptJumpTable() { } if (deopt_jump_table_[i].needs_frame) { __ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry))); - if (is_lazy_deopt) { + if (type == Deoptimizer::LAZY) { if (needs_frame_is_call.is_bound()) { __ b(&needs_frame_is_call); } else { @@ -398,7 +402,7 @@ bool LCodeGen::GenerateDeoptJumpTable() { } } } else { - if (is_lazy_deopt) { + if (type == Deoptimizer::LAZY) { __ mov(lr, Operand(pc), LeaveCC, al); __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry))); } else { @@ -827,14 +831,13 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment, } -void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { +void LCodeGen::DeoptimizeIf(Condition cc, + LEnvironment* environment, + Deoptimizer::BailoutType bailout_type) { RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); ASSERT(environment->HasBeenRegistered()); int id = environment->deoptimization_index(); ASSERT(info()->IsOptimizing() || info()->IsStub()); - Deoptimizer::BailoutType bailout_type = info()->IsStub() - ? Deoptimizer::LAZY - : Deoptimizer::EAGER; Address entry = Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); if (entry == NULL) { @@ -867,9 +870,11 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { // jump entry if this is the case. if (deopt_jump_table_.is_empty() || (deopt_jump_table_.last().address != entry) || - (deopt_jump_table_.last().is_lazy_deopt != needs_lazy_deopt) || + (deopt_jump_table_.last().bailout_type != bailout_type) || (deopt_jump_table_.last().needs_frame != !frame_is_built_)) { - JumpTableEntry table_entry(entry, !frame_is_built_, needs_lazy_deopt); + Deoptimizer::JumpTableEntry table_entry(entry, + bailout_type, + !frame_is_built_); deopt_jump_table_.Add(table_entry, zone()); } __ b(cc, &deopt_jump_table_.last().label); @@ -877,6 +882,21 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { } +void LCodeGen::DeoptimizeIf(Condition cc, + LEnvironment* environment) { + Deoptimizer::BailoutType bailout_type = info()->IsStub() + ? Deoptimizer::LAZY + : Deoptimizer::EAGER; + DeoptimizeIf(cc, environment, bailout_type); +} + + +void LCodeGen::SoftDeoptimize(LEnvironment* environment) { + ASSERT(!info()->IsStub()); + DeoptimizeIf(al, environment, Deoptimizer::SOFT); +} + + void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle code) { ZoneList > maps(1, zone()); int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT); @@ -1428,7 +1448,6 @@ void LCodeGen::DoDivI(LDivI* instr) { const Register left = ToRegister(instr->left()); const Register right = ToRegister(instr->right()); - const Register scratch = scratch0(); const Register result = ToRegister(instr->result()); // Check for x / 0. @@ -1477,8 +1496,8 @@ void LCodeGen::DoDivI(LDivI* instr) { // to be tagged to Smis. If that is not possible, deoptimize. DeferredDivI* deferred = new(zone()) DeferredDivI(this, instr); - __ TrySmiTag(left, &deoptimize, scratch); - __ TrySmiTag(right, &deoptimize, scratch); + __ TrySmiTag(left, &deoptimize); + __ TrySmiTag(right, &deoptimize); __ b(al, deferred->entry()); __ bind(deferred->exit()); @@ -1930,7 +1949,7 @@ void LCodeGen::DoValueOf(LValueOf* instr) { Label done; // If the object is a smi return the object. - __ tst(input, Operand(kSmiTagMask)); + __ SmiTst(input); __ Move(result, input, eq); __ b(eq, &done); @@ -1955,7 +1974,7 @@ void LCodeGen::DoDateField(LDateField* instr) { ASSERT(!scratch.is(scratch0())); ASSERT(!scratch.is(object)); - __ tst(object, Operand(kSmiTagMask)); + __ SmiTst(object); DeoptimizeIf(eq, instr->environment()); __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE); DeoptimizeIf(ne, instr->environment()); @@ -2178,6 +2197,11 @@ void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) { } +void LCodeGen::DoDebugBreak(LDebugBreak* instr) { + __ stop("LBreak"); +} + + void LCodeGen::DoBranch(LBranch* instr) { int true_block = chunk_->LookupDestination(instr->true_block_id()); int false_block = chunk_->LookupDestination(instr->false_block_id()); @@ -2236,7 +2260,7 @@ void LCodeGen::DoBranch(LBranch* instr) { __ JumpIfSmi(reg, true_label); } else if (expected.NeedsMap()) { // If we need a map later and have a Smi -> deopt. - __ tst(reg, Operand(kSmiTagMask)); + __ SmiTst(reg); DeoptimizeIf(eq, instr->environment()); } @@ -2399,48 +2423,6 @@ void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) { } -void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) { - Register scratch = scratch0(); - Register reg = ToRegister(instr->value()); - int false_block = chunk_->LookupDestination(instr->false_block_id()); - - // If the expression is known to be untagged or a smi, then it's definitely - // not null, and it can't be a an undetectable object. - if (instr->hydrogen()->representation().IsSpecialization() || - instr->hydrogen()->type().IsSmi()) { - EmitGoto(false_block); - return; - } - - int true_block = chunk_->LookupDestination(instr->true_block_id()); - Heap::RootListIndex nil_value = instr->nil() == kNullValue ? - Heap::kNullValueRootIndex : - Heap::kUndefinedValueRootIndex; - __ LoadRoot(ip, nil_value); - __ cmp(reg, ip); - if (instr->kind() == kStrictEquality) { - EmitBranch(true_block, false_block, eq); - } else { - Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ? - Heap::kUndefinedValueRootIndex : - Heap::kNullValueRootIndex; - Label* true_label = chunk_->GetAssemblyLabel(true_block); - Label* false_label = chunk_->GetAssemblyLabel(false_block); - __ b(eq, true_label); - __ LoadRoot(ip, other_nil_value); - __ cmp(reg, ip); - __ b(eq, true_label); - __ JumpIfSmi(reg, false_label); - // Check for undetectable objects by looking in the bit field in - // the map. The object has already been smi checked. - __ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset)); - __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); - __ tst(scratch, Operand(1 << Map::kIsUndetectable)); - EmitBranch(true_block, false_block, ne); - } -} - - Condition LCodeGen::EmitIsObject(Register input, Register temp1, Label* is_not_object, @@ -2514,7 +2496,7 @@ void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) { int false_block = chunk_->LookupDestination(instr->false_block_id()); Register input_reg = EmitLoadRegister(instr->value(), ip); - __ tst(input_reg, Operand(kSmiTagMask)); + __ SmiTst(input_reg); EmitBranch(true_block, false_block, eq); } @@ -2920,9 +2902,11 @@ void LCodeGen::DoReturn(LReturn* instr) { count++; } } + int no_frame_start = -1; if (NeedsEagerFrame()) { __ mov(sp, fp); __ ldm(ia_w, sp, fp.bit() | lr.bit()); + no_frame_start = masm_->pc_offset(); } if (instr->has_constant_parameter_count()) { int parameter_count = ToInteger32(instr->constant_parameter_count()); @@ -2938,6 +2922,10 @@ void LCodeGen::DoReturn(LReturn* instr) { } __ Jump(lr); + + if (no_frame_start != -1) { + info_->AddNoFrameRange(no_frame_start, masm_->pc_offset()); + } } @@ -3379,8 +3367,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { // during bound check elimination with the index argument to the bounds // check, which can be tagged, so that case must be handled here, too. if (instr->hydrogen()->key()->representation().IsTagged()) { - __ add(scratch, elements, - Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key)); } else { __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); } @@ -3391,7 +3378,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { // Check for the hole value. if (instr->hydrogen()->RequiresHoleCheck()) { if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { - __ tst(result, Operand(kSmiTagMask)); + __ SmiTst(result); DeoptimizeIf(ne, instr->environment()); } else { __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); @@ -3534,7 +3521,7 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { __ b(eq, &global_object); // Deoptimize if the receiver is not a JS object. - __ tst(receiver, Operand(kSmiTagMask)); + __ SmiTst(receiver); DeoptimizeIf(eq, instr->environment()); __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE); DeoptimizeIf(lt, instr->environment()); @@ -4229,6 +4216,12 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { if (!instr->hydrogen()->value()->range()->IsInSmiRange()) { DeoptimizeIf(vs, instr->environment()); } + } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) { + Register value = ToRegister(instr->value()); + if (!instr->hydrogen()->value()->type().IsHeapObject()) { + __ SmiTst(value); + DeoptimizeIf(eq, instr->environment()); + } } else if (FLAG_track_double_fields && representation.IsDouble()) { ASSERT(transition.is_null()); ASSERT(instr->is_in_object()); @@ -4463,8 +4456,7 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { // during bound check elimination with the index argument to the bounds // check, which can be tagged, so that case must be handled here, too. if (instr->hydrogen()->key()->representation().IsTagged()) { - __ add(scratch, elements, - Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key)); } else { __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); } @@ -5149,14 +5141,14 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) { void LCodeGen::DoCheckSmi(LCheckSmi* instr) { LOperand* input = instr->value(); - __ tst(ToRegister(input), Operand(kSmiTagMask)); + __ SmiTst(ToRegister(input)); DeoptimizeIf(ne, instr->environment()); } void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { LOperand* input = instr->value(); - __ tst(ToRegister(input), Operand(kSmiTagMask)); + __ SmiTst(ToRegister(input)); DeoptimizeIf(eq, instr->environment()); } @@ -5478,92 +5470,6 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) { } -void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { - Handle literals = instr->hydrogen()->literals(); - ElementsKind boilerplate_elements_kind = - instr->hydrogen()->boilerplate_elements_kind(); - AllocationSiteMode allocation_site_mode = - instr->hydrogen()->allocation_site_mode(); - - // Deopt if the array literal boilerplate ElementsKind is of a type different - // than the expected one. The check isn't necessary if the boilerplate has - // already been converted to TERMINAL_FAST_ELEMENTS_KIND. - if (CanTransitionToMoreGeneralFastElementsKind( - boilerplate_elements_kind, true)) { - __ LoadHeapObject(r1, instr->hydrogen()->boilerplate_object()); - // Load map into r2. - __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); - // Load the map's "bit field 2". - __ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset)); - // Retrieve elements_kind from bit field 2. - __ ubfx(r2, r2, Map::kElementsKindShift, Map::kElementsKindBitCount); - __ cmp(r2, Operand(boilerplate_elements_kind)); - DeoptimizeIf(ne, instr->environment()); - } - - // Set up the parameters to the stub/runtime call. - __ LoadHeapObject(r3, literals); - __ mov(r2, Operand(Smi::FromInt(instr->hydrogen()->literal_index()))); - // Boilerplate already exists, constant elements are never accessed. - // Pass an empty fixed array. - __ mov(r1, Operand(isolate()->factory()->empty_fixed_array())); - - // Pick the right runtime function or stub to call. - int length = instr->hydrogen()->length(); - if (instr->hydrogen()->IsCopyOnWrite()) { - ASSERT(instr->hydrogen()->depth() == 1); - FastCloneShallowArrayStub::Mode mode = - FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS; - FastCloneShallowArrayStub stub(mode, DONT_TRACK_ALLOCATION_SITE, length); - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); - } else if (instr->hydrogen()->depth() > 1) { - __ Push(r3, r2, r1); - CallRuntime(Runtime::kCreateArrayLiteral, 3, instr); - } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) { - __ Push(r3, r2, r1); - CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr); - } else { - FastCloneShallowArrayStub::Mode mode = - boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS - ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS - : FastCloneShallowArrayStub::CLONE_ELEMENTS; - FastCloneShallowArrayStub stub(mode, allocation_site_mode, length); - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); - } -} - - -void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) { - Handle literals = instr->hydrogen()->literals(); - Handle constant_properties = - instr->hydrogen()->constant_properties(); - - // Set up the parameters to the stub/runtime call. - __ LoadHeapObject(r3, literals); - __ mov(r2, Operand(Smi::FromInt(instr->hydrogen()->literal_index()))); - __ mov(r1, Operand(constant_properties)); - int flags = instr->hydrogen()->fast_elements() - ? ObjectLiteral::kFastElements - : ObjectLiteral::kNoFlags; - __ mov(r0, Operand(Smi::FromInt(flags))); - - // Pick the right runtime function or stub to call. - int properties_count = instr->hydrogen()->constant_properties_length() / 2; - if ((FLAG_track_double_fields && instr->hydrogen()->may_store_doubles()) || - instr->hydrogen()->depth() > 1) { - __ Push(r3, r2, r1, r0); - CallRuntime(Runtime::kCreateObjectLiteral, 4, instr); - } else if (flags != ObjectLiteral::kFastElements || - properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) { - __ Push(r3, r2, r1, r0); - CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr); - } else { - FastCloneShallowObjectStub stub(properties_count); - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); - } -} - - void LCodeGen::DoToFastProperties(LToFastProperties* instr) { ASSERT(ToRegister(instr->value()).is(r0)); __ push(r0); @@ -5796,7 +5702,11 @@ void LCodeGen::DoLazyBailout(LLazyBailout* instr) { void LCodeGen::DoDeoptimize(LDeoptimize* instr) { - DeoptimizeIf(al, instr->environment()); + if (instr->hydrogen_value()->IsSoftDeoptimize()) { + SoftDeoptimize(instr->environment()); + } else { + DeoptimizeIf(al, instr->environment()); + } } @@ -5917,7 +5827,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { __ cmp(r0, null_value); DeoptimizeIf(eq, instr->environment()); - __ tst(r0, Operand(kSmiTagMask)); + __ SmiTst(r0); DeoptimizeIf(eq, instr->environment()); STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); @@ -5985,8 +5895,7 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { __ cmp(index, Operand::Zero()); __ b(lt, &out_of_object); - STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize); - __ add(scratch, object, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ add(scratch, object, Operand::PointerOffsetFromSmiKey(index)); __ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize)); __ b(&done); @@ -5994,7 +5903,8 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { __ bind(&out_of_object); __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); // Index is equal to negated out of object property index plus 1. - __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize)); + STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); + __ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index)); __ ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize - kPointerSize)); __ bind(&done); diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h index 294dcf205..1a34169eb 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.h +++ b/deps/v8/src/arm/lithium-codegen-arm.h @@ -290,7 +290,11 @@ class LCodeGen BASE_EMBEDDED { void RegisterEnvironmentForDeoptimization(LEnvironment* environment, Safepoint::DeoptMode mode); + void DeoptimizeIf(Condition cc, + LEnvironment* environment, + Deoptimizer::BailoutType bailout_type); void DeoptimizeIf(Condition cc, LEnvironment* environment); + void SoftDeoptimize(LEnvironment* environment); void AddToTranslation(Translation* translation, LOperand* op, @@ -387,18 +391,6 @@ class LCodeGen BASE_EMBEDDED { Register scratch, LEnvironment* environment); - struct JumpTableEntry { - inline JumpTableEntry(Address entry, bool frame, bool is_lazy) - : label(), - address(entry), - needs_frame(frame), - is_lazy_deopt(is_lazy) { } - Label label; - Address address; - bool needs_frame; - bool is_lazy_deopt; - }; - void EnsureSpaceForLazyDeopt(); void DoLoadKeyedExternalArray(LLoadKeyed* instr); void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr); @@ -416,7 +408,7 @@ class LCodeGen BASE_EMBEDDED { int current_instruction_; const ZoneList* instructions_; ZoneList deoptimizations_; - ZoneList deopt_jump_table_; + ZoneList deopt_jump_table_; ZoneList > deoptimization_literals_; ZoneList > prototype_maps_; ZoneList > transition_maps_; diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index 6e0b4a704..a3b21a2bd 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -495,9 +495,7 @@ void MacroAssembler::RecordWrite(Register object, Label done; if (smi_check == INLINE_SMI_CHECK) { - ASSERT_EQ(0, kSmiTag); - tst(value, Operand(kSmiTagMask)); - b(eq, &done); + JumpIfSmi(value, &done); } CheckPageFlag(value, @@ -978,7 +976,7 @@ void MacroAssembler::InitializeNewString(Register string, Heap::RootListIndex map_index, Register scratch1, Register scratch2) { - mov(scratch1, Operand(length, LSL, kSmiTagSize)); + SmiTag(scratch1, length); LoadRoot(scratch2, map_index); str(scratch1, FieldMemOperand(string, String::kLengthOffset)); mov(scratch1, Operand(String::kEmptyHashField)); @@ -1221,7 +1219,7 @@ void MacroAssembler::InvokeFunction(Register fun, ldr(expected_reg, FieldMemOperand(code_reg, SharedFunctionInfo::kFormalParameterCountOffset)); - mov(expected_reg, Operand(expected_reg, ASR, kSmiTagSize)); + SmiUntag(expected_reg); ldr(code_reg, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); @@ -1359,7 +1357,7 @@ void MacroAssembler::JumpToHandlerEntry() { mov(r2, Operand(r2, LSR, StackHandler::kKindWidth)); // Handler index. ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2)); // Smi-tagged offset. add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start. - add(pc, r1, Operand(r2, ASR, kSmiTagSize)); // Jump. + add(pc, r1, Operand::SmiUntag(r2)); // Jump } @@ -1575,7 +1573,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss, // Compute the capacity mask. ldr(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset)); - mov(t1, Operand(t1, ASR, kSmiTagSize)); // convert smi to int + SmiUntag(t1); sub(t1, t1, Operand(1)); // Generate an unrolled loop that performs a few probes before giving up. @@ -2095,14 +2093,10 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, b(&store); bind(&smi_value); - Register untagged_value = scratch1; - SmiUntag(untagged_value, value_reg); - vmov(s2, untagged_value); - vcvt_f64_s32(d0, s2); + SmiToDouble(d0, value_reg); bind(&store); - add(scratch1, elements_reg, - Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); + add(scratch1, elements_reg, Operand::DoubleOffsetFromSmiKey(key_reg)); vstr(d0, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset)); } @@ -2268,7 +2262,9 @@ static int AddressOffset(ExternalReference ref0, ExternalReference ref1) { void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function, - int stack_space) { + int stack_space, + bool returns_handle, + int return_value_offset) { ExternalReference next_address = ExternalReference::handle_scope_next_address(isolate()); const int kNextOffset = 0; @@ -2314,13 +2310,20 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function, Label promote_scheduled_exception; Label delete_allocated_handles; Label leave_exit_frame; - - // If result is non-zero, dereference to get the result value - // otherwise set it to undefined. - cmp(r0, Operand::Zero()); - LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq); - ldr(r0, MemOperand(r0), ne); - + Label return_value_loaded; + + if (returns_handle) { + Label load_return_value; + cmp(r0, Operand::Zero()); + b(eq, &load_return_value); + // derefernce returned value + ldr(r0, MemOperand(r0)); + b(&return_value_loaded); + bind(&load_return_value); + } + // load value from ReturnValue + ldr(r0, MemOperand(fp, return_value_offset*kPointerSize)); + bind(&return_value_loaded); // No more valid handles (the result handle was the last one). Restore // previous handle scope. str(r4, MemOperand(r7, kNextOffset)); @@ -2390,70 +2393,21 @@ void MacroAssembler::IndexFromHash(Register hash, Register index) { (1 << String::kArrayIndexValueBits)); // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in // the low kHashShift bits. - STATIC_ASSERT(kSmiTag == 0); Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits); - mov(index, Operand(hash, LSL, kSmiTagSize)); -} - - -void MacroAssembler::IntegerToDoubleConversionWithVFP3(Register inReg, - Register outHighReg, - Register outLowReg) { - // ARMv7 VFP3 instructions to implement integer to double conversion. - mov(r7, Operand(inReg, ASR, kSmiTagSize)); - vmov(s15, r7); - vcvt_f64_s32(d7, s15); - vmov(outLowReg, outHighReg, d7); -} - - -void MacroAssembler::ObjectToDoubleVFPRegister(Register object, - DwVfpRegister result, - Register scratch1, - Register scratch2, - Register heap_number_map, - SwVfpRegister scratch3, - Label* not_number, - ObjectToDoubleFlags flags) { - Label done; - if ((flags & OBJECT_NOT_SMI) == 0) { - Label not_smi; - JumpIfNotSmi(object, ¬_smi); - // Remove smi tag and convert to double. - mov(scratch1, Operand(object, ASR, kSmiTagSize)); - vmov(scratch3, scratch1); - vcvt_f64_s32(result, scratch3); - b(&done); - bind(¬_smi); - } - // Check for heap number and load double value from it. - ldr(scratch1, FieldMemOperand(object, HeapObject::kMapOffset)); - sub(scratch2, object, Operand(kHeapObjectTag)); - cmp(scratch1, heap_number_map); - b(ne, not_number); - if ((flags & AVOID_NANS_AND_INFINITIES) != 0) { - // If exponent is all ones the number is either a NaN or +/-Infinity. - ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); - Sbfx(scratch1, - scratch1, - HeapNumber::kExponentShift, - HeapNumber::kExponentBits); - // All-one value sign extend to -1. - cmp(scratch1, Operand(-1)); - b(eq, not_number); - } - vldr(result, scratch2, HeapNumber::kValueOffset); - bind(&done); + SmiTag(index, hash); } -void MacroAssembler::SmiToDoubleVFPRegister(Register smi, - DwVfpRegister value, - Register scratch1, - SwVfpRegister scratch2) { - mov(scratch1, Operand(smi, ASR, kSmiTagSize)); - vmov(scratch2, scratch1); - vcvt_f64_s32(value, scratch2); +void MacroAssembler::SmiToDouble(DwVfpRegister value, Register smi) { + ASSERT(value.code() < 16); + if (CpuFeatures::IsSupported(VFP3)) { + vmov(value.low(), smi); + vcvt_f64_s32(value, 1); + } else { + SmiUntag(ip, smi); + vmov(value.low(), ip); + vcvt_f64_s32(value, value.low()); + } } @@ -2610,7 +2564,7 @@ void MacroAssembler::GetLeastBitsFromSmi(Register dst, if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) { ubfx(dst, src, kSmiTagSize, num_least_bits); } else { - mov(dst, Operand(src, ASR, kSmiTagSize)); + SmiUntag(dst, src); and_(dst, dst, Operand((1 << num_least_bits) - 1)); } } @@ -3005,7 +2959,7 @@ void MacroAssembler::JumpIfNotBothSmi(Register reg1, void MacroAssembler::UntagAndJumpIfSmi( Register dst, Register src, Label* smi_case) { STATIC_ASSERT(kSmiTag == 0); - mov(dst, Operand(src, ASR, kSmiTagSize), SetCC); + SmiUntag(dst, src, SetCC); b(cc, smi_case); // Shifter carry is not set for a smi. } @@ -3013,7 +2967,7 @@ void MacroAssembler::UntagAndJumpIfSmi( void MacroAssembler::UntagAndJumpIfNotSmi( Register dst, Register src, Label* non_smi_case) { STATIC_ASSERT(kSmiTag == 0); - mov(dst, Operand(src, ASR, kSmiTagSize), SetCC); + SmiUntag(dst, src, SetCC); b(cs, non_smi_case); // Shifter carry is set for a non-smi. } @@ -3120,7 +3074,6 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first, Register scratch2, Label* failure) { // Check that neither is a smi. - STATIC_ASSERT(kSmiTag == 0); and_(scratch1, first, Operand(second)); JumpIfSmi(scratch1, failure); JumpIfNonSmisNotBothSequentialAsciiStrings(first, diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index 90272911c..50f53b316 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -44,12 +44,6 @@ inline MemOperand FieldMemOperand(Register object, int offset) { } -inline Operand SmiUntagOperand(Register object) { - return Operand(object, ASR, kSmiTagSize); -} - - - // Give alias names to registers const Register cp = { 8 }; // JavaScript context pointer const Register kRootRegister = { 10 }; // Roots array pointer. @@ -62,16 +56,6 @@ enum TaggingMode { DONT_TAG_RESULT }; -// Flags used for the ObjectToDoubleVFPRegister function. -enum ObjectToDoubleFlags { - // No special flags. - NO_OBJECT_TO_DOUBLE_FLAGS = 0, - // Object is known to be a non smi. - OBJECT_NOT_SMI = 1 << 0, - // Don't load NaNs or infinities, branch to the non number case instead. - AVOID_NANS_AND_INFINITIES = 1 << 1 -}; - enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; @@ -974,31 +958,9 @@ class MacroAssembler: public Assembler { void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits); void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits); - // Uses VFP instructions to Convert a Smi to a double. - void IntegerToDoubleConversionWithVFP3(Register inReg, - Register outHighReg, - Register outLowReg); - - // Load the value of a number object into a VFP double register. If the object - // is not a number a jump to the label not_number is performed and the VFP - // double register is unchanged. - void ObjectToDoubleVFPRegister( - Register object, - DwVfpRegister value, - Register scratch1, - Register scratch2, - Register heap_number_map, - SwVfpRegister scratch3, - Label* not_number, - ObjectToDoubleFlags flags = NO_OBJECT_TO_DOUBLE_FLAGS); - - // Load the value of a smi object into a VFP double register. The register - // scratch1 can be the same register as smi in which case smi will hold the - // untagged value afterwards. - void SmiToDoubleVFPRegister(Register smi, - DwVfpRegister value, - Register scratch1, - SwVfpRegister scratch2); + // Load the value of a smi object into a double register. + // The register value must be between d0 and d15. + void SmiToDouble(DwVfpRegister value, Register smi); // Check if a double can be exactly represented as a signed 32-bit integer. // Z flag set to one if true. @@ -1125,7 +1087,10 @@ class MacroAssembler: public Assembler { // from handle and propagates exceptions. Restores context. stack_space // - space to be unwound on exit (includes the call JS arguments space and // the additional space allocated for the fast call). - void CallApiFunctionAndReturn(ExternalReference function, int stack_space); + void CallApiFunctionAndReturn(ExternalReference function, + int stack_space, + bool returns_handle, + int return_value_offset_from_fp); // Jump to a runtime routine. void JumpToExternalReference(const ExternalReference& builtin); @@ -1228,18 +1193,21 @@ class MacroAssembler: public Assembler { // Try to convert int32 to smi. If the value is to large, preserve // the original value and jump to not_a_smi. Destroys scratch and // sets flags. - void TrySmiTag(Register reg, Label* not_a_smi, Register scratch) { - mov(scratch, reg); - SmiTag(scratch, SetCC); + void TrySmiTag(Register reg, Label* not_a_smi) { + TrySmiTag(reg, reg, not_a_smi); + } + void TrySmiTag(Register reg, Register src, Label* not_a_smi) { + SmiTag(ip, src, SetCC); b(vs, not_a_smi); - mov(reg, scratch); + mov(reg, ip); } + void SmiUntag(Register reg, SBit s = LeaveCC) { - mov(reg, Operand(reg, ASR, kSmiTagSize), s); + mov(reg, Operand::SmiUntag(reg), s); } void SmiUntag(Register dst, Register src, SBit s = LeaveCC) { - mov(dst, Operand(src, ASR, kSmiTagSize), s); + mov(dst, Operand::SmiUntag(src), s); } // Untag the source value into destination and jump if source is a smi. @@ -1250,6 +1218,13 @@ class MacroAssembler: public Assembler { // Souce and destination can be the same register. void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case); + // Test if the register contains a smi (Z == 0 (eq) if true). + inline void SmiTst(Register value) { + tst(value, Operand(kSmiTagMask)); + } + inline void NonNegativeSmiTst(Register value) { + tst(value, Operand(kSmiTagMask | kSmiSignMask)); + } // Jump if the register contains a smi. inline void JumpIfSmi(Register value, Label* smi_label) { tst(value, Operand(kSmiTagMask)); diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc index af65bc70b..c9db167b0 100644 --- a/deps/v8/src/arm/simulator-arm.cc +++ b/deps/v8/src/arm/simulator-arm.cc @@ -412,7 +412,7 @@ void ArmDebugger::Debug() { HeapObject* obj = reinterpret_cast(*cur); int value = *cur; Heap* current_heap = v8::internal::Isolate::Current()->heap(); - if (current_heap->Contains(obj) || ((value & 1) == 0)) { + if (((value & 1) == 0) || current_heap->Contains(obj)) { PrintF(" ("); if ((value & 1) == 0) { PrintF("smi %d", value / 2); @@ -1628,10 +1628,13 @@ typedef double (*SimulatorRuntimeFPIntCall)(double darg0, int32_t arg0); // This signature supports direct call in to API function native callback // (refer to InvocationCallback in v8.h). typedef v8::Handle (*SimulatorRuntimeDirectApiCall)(int32_t arg0); +typedef void (*SimulatorRuntimeDirectApiCallNew)(int32_t arg0); // This signature supports direct call to accessor getter callback. typedef v8::Handle (*SimulatorRuntimeDirectGetterCall)(int32_t arg0, int32_t arg1); +typedef void (*SimulatorRuntimeDirectGetterCallNew)(int32_t arg0, + int32_t arg1); // Software interrupt instructions are used by the simulator to call into the // C-based V8 runtime. @@ -1770,40 +1773,56 @@ void Simulator::SoftwareInterrupt(Instruction* instr) { break; } } - } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) { - SimulatorRuntimeDirectApiCall target = - reinterpret_cast(external); + } else if ( + redirection->type() == ExternalReference::DIRECT_API_CALL || + redirection->type() == ExternalReference::DIRECT_API_CALL_NEW) { if (::v8::internal::FLAG_trace_sim || !stack_aligned) { PrintF("Call to host function at %p args %08x", - FUNCTION_ADDR(target), arg0); + reinterpret_cast(external), arg0); if (!stack_aligned) { PrintF(" with unaligned stack %08x\n", get_register(sp)); } PrintF("\n"); } CHECK(stack_aligned); - v8::Handle result = target(arg0); - if (::v8::internal::FLAG_trace_sim) { - PrintF("Returned %p\n", reinterpret_cast(*result)); + if (redirection->type() == ExternalReference::DIRECT_API_CALL) { + SimulatorRuntimeDirectApiCall target = + reinterpret_cast(external); + v8::Handle result = target(arg0); + if (::v8::internal::FLAG_trace_sim) { + PrintF("Returned %p\n", reinterpret_cast(*result)); + } + set_register(r0, reinterpret_cast(*result)); + } else { + SimulatorRuntimeDirectApiCallNew target = + reinterpret_cast(external); + target(arg0); } - set_register(r0, reinterpret_cast(*result)); - } else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) { - SimulatorRuntimeDirectGetterCall target = - reinterpret_cast(external); + } else if ( + redirection->type() == ExternalReference::DIRECT_GETTER_CALL || + redirection->type() == ExternalReference::DIRECT_GETTER_CALL_NEW) { if (::v8::internal::FLAG_trace_sim || !stack_aligned) { PrintF("Call to host function at %p args %08x %08x", - FUNCTION_ADDR(target), arg0, arg1); + reinterpret_cast(external), arg0, arg1); if (!stack_aligned) { PrintF(" with unaligned stack %08x\n", get_register(sp)); } PrintF("\n"); } CHECK(stack_aligned); - v8::Handle result = target(arg0, arg1); - if (::v8::internal::FLAG_trace_sim) { - PrintF("Returned %p\n", reinterpret_cast(*result)); + if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) { + SimulatorRuntimeDirectGetterCall target = + reinterpret_cast(external); + v8::Handle result = target(arg0, arg1); + if (::v8::internal::FLAG_trace_sim) { + PrintF("Returned %p\n", reinterpret_cast(*result)); + } + set_register(r0, reinterpret_cast(*result)); + } else { + SimulatorRuntimeDirectGetterCallNew target = + reinterpret_cast(external); + target(arg0, arg1); } - set_register(r0, reinterpret_cast(*result)); } else { // builtin call. ASSERT(redirection->type() == ExternalReference::BUILTIN_CALL); @@ -2698,6 +2717,7 @@ void Simulator::DecodeType7(Instruction* instr) { // vmov :Rt = Sn // vcvt: Dd = Sm // vcvt: Sd = Dm +// vcvt.f64.s32 Dd, Dd, # // Dd = vabs(Dm) // Dd = vneg(Dm) // Dd = vadd(Dn, Dm) @@ -2746,6 +2766,13 @@ void Simulator::DecodeTypeVFP(Instruction* instr) { DecodeVCVTBetweenDoubleAndSingle(instr); } else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) { DecodeVCVTBetweenFloatingPointAndInteger(instr); + } else if ((instr->Opc2Value() == 0xA) && (instr->Opc3Value() == 0x3) && + (instr->Bit(8) == 1)) { + // vcvt.f64.s32 Dd, Dd, # + int fraction_bits = 32 - ((instr->Bit(5) << 4) | instr->Bits(3, 0)); + int fixed_value = get_sinteger_from_s_register(vd * 2); + double divide = 1 << fraction_bits; + set_d_register_from_double(vd, fixed_value / divide); } else if (((instr->Opc2Value() >> 1) == 0x6) && (instr->Opc3Value() & 0x1)) { DecodeVCVTBetweenFloatingPointAndInteger(instr); diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc index 127bf3fdd..b0de01451 100644 --- a/deps/v8/src/arm/stub-cache-arm.cc +++ b/deps/v8/src/arm/stub-cache-arm.cc @@ -516,6 +516,8 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm, if (FLAG_track_fields && representation.IsSmi()) { __ JumpIfNotSmi(value_reg, miss_restore_name); + } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) { + __ JumpIfSmi(value_reg, miss_restore_name); } else if (FLAG_track_double_fields && representation.IsDouble()) { Label do_store, heap_number; __ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex); @@ -685,6 +687,8 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, ASSERT(!representation.IsNone()); if (FLAG_track_fields && representation.IsSmi()) { __ JumpIfNotSmi(value_reg, miss_label); + } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) { + __ JumpIfSmi(value_reg, miss_label); } else if (FLAG_track_double_fields && representation.IsDouble()) { // Load the double storage. if (index < 0) { @@ -848,7 +852,7 @@ static void CompileCallLoadPropertyWithInterceptor( } -static const int kFastApiCallArguments = 4; +static const int kFastApiCallArguments = FunctionCallbackArguments::kArgsLength; // Reserves space for the extra arguments to API function in the // caller's frame. @@ -877,10 +881,11 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm, // -- sp[4] : callee JS function // -- sp[8] : call data // -- sp[12] : isolate - // -- sp[16] : last JS argument + // -- sp[16] : ReturnValue + // -- sp[20] : last JS argument // -- ... - // -- sp[(argc + 3) * 4] : first JS argument - // -- sp[(argc + 4) * 4] : receiver + // -- sp[(argc + 4) * 4] : first JS argument + // -- sp[(argc + 5) * 4] : receiver // ----------------------------------- // Get the function and setup the context. Handle function = optimization.constant_function(); @@ -897,11 +902,13 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm, __ Move(r6, call_data); } __ mov(r7, Operand(ExternalReference::isolate_address(masm->isolate()))); - // Store JS function, call data and isolate. + // Store JS function, call data, isolate and ReturnValue. __ stm(ib, sp, r5.bit() | r6.bit() | r7.bit()); + __ LoadRoot(r5, Heap::kUndefinedValueRootIndex); + __ str(r5, MemOperand(sp, 4 * kPointerSize)); // Prepare arguments. - __ add(r2, sp, Operand(3 * kPointerSize)); + __ add(r2, sp, Operand(4 * kPointerSize)); // Allocate the v8::Arguments structure in the arguments' space since // it's not controlled by GC. @@ -927,13 +934,21 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm, const int kStackUnwindSpace = argc + kFastApiCallArguments + 1; Address function_address = v8::ToCData
(api_call_info->callback()); + bool returns_handle = + !CallbackTable::ReturnsVoid(masm->isolate(), function_address); ApiFunction fun(function_address); + ExternalReference::Type type = + returns_handle ? + ExternalReference::DIRECT_API_CALL : + ExternalReference::DIRECT_API_CALL_NEW; ExternalReference ref = ExternalReference(&fun, - ExternalReference::DIRECT_API_CALL, + type, masm->isolate()); AllowExternalCallThatCantCauseGC scope(masm); - - __ CallApiFunctionAndReturn(ref, kStackUnwindSpace); + __ CallApiFunctionAndReturn(ref, + kStackUnwindSpace, + returns_handle, + kFastApiCallArguments + 1); } @@ -1409,7 +1424,8 @@ void BaseLoadStubCompiler::GenerateLoadCallback( __ Push(reg, scratch3()); __ mov(scratch3(), Operand(ExternalReference::isolate_address(isolate()))); - __ Push(scratch3(), name()); + __ LoadRoot(scratch4(), Heap::kUndefinedValueRootIndex); + __ Push(scratch3(), scratch4(), name()); __ mov(r0, sp); // r0 = Handle const int kApiStackSpace = 1; @@ -1421,12 +1437,21 @@ void BaseLoadStubCompiler::GenerateLoadCallback( __ str(scratch2(), MemOperand(sp, 1 * kPointerSize)); __ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo& - const int kStackUnwindSpace = 5; + const int kStackUnwindSpace = kFastApiCallArguments + 1; Address getter_address = v8::ToCData
(callback->getter()); + bool returns_handle = + !CallbackTable::ReturnsVoid(isolate(), getter_address); ApiFunction fun(getter_address); - ExternalReference ref = ExternalReference( - &fun, ExternalReference::DIRECT_GETTER_CALL, isolate()); - __ CallApiFunctionAndReturn(ref, kStackUnwindSpace); + ExternalReference::Type type = + returns_handle ? + ExternalReference::DIRECT_GETTER_CALL : + ExternalReference::DIRECT_GETTER_CALL_NEW; + + ExternalReference ref = ExternalReference(&fun, type, isolate()); + __ CallApiFunctionAndReturn(ref, + kStackUnwindSpace, + returns_handle, + 3); } @@ -1676,8 +1701,6 @@ Handle CallStubCompiler::CompileArrayPushCall( // Get the array's length into r0 and calculate new length. __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); - STATIC_ASSERT(kSmiTagSize == 1); - STATIC_ASSERT(kSmiTag == 0); __ add(r0, r0, Operand(Smi::FromInt(argc))); // Get the elements' length. @@ -1697,8 +1720,7 @@ Handle CallStubCompiler::CompileArrayPushCall( // Store the value. // We may need a register containing the address end_elements below, // so write back the value in end_elements. - __ add(end_elements, elements, - Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(r0)); const int kEndElementsOffset = FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize; __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex)); @@ -1718,8 +1740,6 @@ Handle CallStubCompiler::CompileArrayPushCall( // Get the array's length into r0 and calculate new length. __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); - STATIC_ASSERT(kSmiTagSize == 1); - STATIC_ASSERT(kSmiTag == 0); __ add(r0, r0, Operand(Smi::FromInt(argc))); // Get the elements' length. @@ -1793,8 +1813,7 @@ Handle CallStubCompiler::CompileArrayPushCall( // Store the value. // We may need a register containing the address end_elements below, // so write back the value in end_elements. - __ add(end_elements, elements, - Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(r0)); __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex)); __ RecordWrite(elements, @@ -1831,8 +1850,7 @@ Handle CallStubCompiler::CompileArrayPushCall( const int kAllocationDelta = 4; // Load top and check if it is the end of elements. - __ add(end_elements, elements, - Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(r0)); __ add(end_elements, end_elements, Operand(kEndElementsOffset)); __ mov(r7, Operand(new_space_allocation_top)); __ ldr(r3, MemOperand(r7)); @@ -1928,11 +1946,9 @@ Handle CallStubCompiler::CompileArrayPopCall( // Get the last element. __ LoadRoot(r6, Heap::kTheHoleValueRootIndex); - STATIC_ASSERT(kSmiTagSize == 1); - STATIC_ASSERT(kSmiTag == 0); // We can't address the last element in one operation. Compute the more // expensive shift first, and use an offset later on. - __ add(elements, elements, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ add(elements, elements, Operand::PointerOffsetFromSmiKey(r4)); __ ldr(r0, FieldMemOperand(elements, FixedArray::kHeaderSize)); __ cmp(r0, r6); __ b(eq, &call_builtin); @@ -2154,7 +2170,6 @@ Handle CallStubCompiler::CompileStringFromCharCodeCall( if (cell.is_null()) { __ ldr(r1, MemOperand(sp, 1 * kPointerSize)); - STATIC_ASSERT(kSmiTag == 0); __ JumpIfSmi(r1, &miss); CheckPrototypes(Handle::cast(object), r1, holder, r0, r3, r4, @@ -2172,7 +2187,6 @@ Handle CallStubCompiler::CompileStringFromCharCodeCall( // Check the code is a smi. Label slow; - STATIC_ASSERT(kSmiTag == 0); __ JumpIfNotSmi(code, &slow); // Convert the smi code to uint16. @@ -2226,7 +2240,6 @@ Handle CallStubCompiler::CompileMathFloorCall( if (cell.is_null()) { __ ldr(r1, MemOperand(sp, 1 * kPointerSize)); - STATIC_ASSERT(kSmiTag == 0); __ JumpIfSmi(r1, &miss); CheckPrototypes(Handle::cast(object), r1, holder, r0, r3, r4, name, &miss); @@ -2241,8 +2254,7 @@ Handle CallStubCompiler::CompileMathFloorCall( __ ldr(r0, MemOperand(sp, 0 * kPointerSize)); // If the argument is a smi, just return. - STATIC_ASSERT(kSmiTag == 0); - __ tst(r0, Operand(kSmiTagMask)); + __ SmiTst(r0); __ Drop(argc + 1, eq); __ Ret(eq); @@ -2288,11 +2300,9 @@ Handle CallStubCompiler::CompileMathFloorCall( __ bind(&smi_check); // Check if the result can fit into an smi. If we had an overflow, // the result is either 0x80000000 or 0x7FFFFFFF and won't fit into an smi. - __ add(r1, r0, Operand(0x40000000), SetCC); // If result doesn't fit into an smi, branch to slow. - __ b(&slow, mi); - // Tag the result. - __ mov(r0, Operand(r0, LSL, kSmiTagSize)); + __ SmiTag(r0, SetCC); + __ b(vs, &slow); __ bind(&just_return); __ Drop(argc + 1); @@ -2337,7 +2347,6 @@ Handle CallStubCompiler::CompileMathAbsCall( GenerateNameCheck(name, &miss); if (cell.is_null()) { __ ldr(r1, MemOperand(sp, 1 * kPointerSize)); - STATIC_ASSERT(kSmiTag == 0); __ JumpIfSmi(r1, &miss); CheckPrototypes(Handle::cast(object), r1, holder, r0, r3, r4, name, &miss); @@ -2353,7 +2362,6 @@ Handle CallStubCompiler::CompileMathAbsCall( // Check if the argument is a smi. Label not_smi; - STATIC_ASSERT(kSmiTag == 0); __ JumpIfNotSmi(r0, ¬_smi); // Do bitwise not or do nothing depending on the sign of the @@ -3233,8 +3241,7 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement( Register key = r0; Register receiver = r1; - __ JumpIfNotSmi(key, &miss_force_generic); - __ mov(r2, Operand(key, ASR, kSmiTagSize)); + __ UntagAndJumpIfNotSmi(r2, key, &miss_force_generic); __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ LoadFromNumberDictionary(&slow, r4, key, r0, r2, r3, r5); __ Ret(); @@ -3266,7 +3273,6 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement( static void GenerateSmiKeyCheck(MacroAssembler* masm, Register key, Register scratch0, - Register scratch1, DwVfpRegister double_scratch0, DwVfpRegister double_scratch1, Label* fail) { @@ -3284,8 +3290,7 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm, __ vldr(double_scratch0, ip, HeapNumber::kValueOffset); __ TryDoubleToInt32Exact(scratch0, double_scratch0, double_scratch1); __ b(ne, fail); - __ TrySmiTag(scratch0, fail, scratch1); - __ mov(key, scratch0); + __ TrySmiTag(key, scratch0, fail); __ bind(&key_ok); } @@ -3311,7 +3316,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( // have been verified by the caller to not be a smi. // Check that the key is a smi or a heap number convertible to a smi. - GenerateSmiKeyCheck(masm, key, r4, r5, d1, d2, &miss_force_generic); + GenerateSmiKeyCheck(masm, key, r4, d1, d2, &miss_force_generic); __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset)); @@ -3326,11 +3331,10 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( // r3: external array. if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) { // Double to pixel conversion is only implemented in the runtime for now. - __ JumpIfNotSmi(value, &slow); + __ UntagAndJumpIfNotSmi(r5, value, &slow); } else { - __ JumpIfNotSmi(value, &check_heap_number); + __ UntagAndJumpIfNotSmi(r5, value, &check_heap_number); } - __ SmiUntag(r5, value); __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset)); // r3: base pointer of external storage. @@ -3501,7 +3505,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( // have been verified by the caller to not be a smi. // Check that the key is a smi or a heap number convertible to a smi. - GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic); + GenerateSmiKeyCheck(masm, key_reg, r4, d1, d2, &miss_force_generic); if (IsFastSmiElementsKind(elements_kind)) { __ JumpIfNotSmi(value_reg, &transition_elements_kind); @@ -3535,20 +3539,14 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( __ add(scratch, elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); - __ add(scratch, - scratch, - Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ add(scratch, scratch, Operand::PointerOffsetFromSmiKey(key_reg)); __ str(value_reg, MemOperand(scratch)); } else { ASSERT(IsFastObjectElementsKind(elements_kind)); __ add(scratch, elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); - __ add(scratch, - scratch, - Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ add(scratch, scratch, Operand::PointerOffsetFromSmiKey(key_reg)); __ str(value_reg, MemOperand(scratch)); __ mov(receiver_reg, value_reg); __ RecordWrite(elements_reg, // Object. @@ -3662,7 +3660,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( // have been verified by the caller to not be a smi. // Check that the key is a smi or a heap number convertible to a smi. - GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic); + GenerateSmiKeyCheck(masm, key_reg, r4, d1, d2, &miss_force_generic); __ ldr(elements_reg, FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js index 54f0b486e..599fd5cfe 100644 --- a/deps/v8/src/array.js +++ b/deps/v8/src/array.js @@ -416,6 +416,26 @@ function ArrayPop() { } +function ObservedArrayPush() { + var n = TO_UINT32(this.length); + var m = %_ArgumentsLength(); + + EnqueueSpliceRecord(this, n, [], 0, m); + + try { + BeginPerformSplice(this); + + for (var i = 0; i < m; i++) { + this[i+n] = %_Arguments(i); + } + this.length = n + m; + } finally { + EndPerformSplice(this); + } + + return this.length; +} + // Appends the arguments to the end of the array and returns the new // length of the array. See ECMA-262, section 15.4.4.7. function ArrayPush() { @@ -424,6 +444,9 @@ function ArrayPush() { ["Array.prototype.push"]); } + if (%IsObserved(this)) + return ObservedArrayPush.apply(this, arguments); + var n = TO_UINT32(this.length); var m = %_ArgumentsLength(); for (var i = 0; i < m; i++) { diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h index 6abd5c55d..2d9e727e5 100644 --- a/deps/v8/src/assembler.h +++ b/deps/v8/src/assembler.h @@ -647,9 +647,17 @@ class ExternalReference BASE_EMBEDDED { // Handle f(v8::Arguments&) DIRECT_API_CALL, + // Direct call to API function callback. + // void f(v8::Arguments&) + DIRECT_API_CALL_NEW, + // Direct call to accessor getter callback. // Handle f(Local property, AccessorInfo& info) - DIRECT_GETTER_CALL + DIRECT_GETTER_CALL, + + // Direct call to accessor getter callback. + // void f(Local property, AccessorInfo& info) + DIRECT_GETTER_CALL_NEW }; static void SetUp(); diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h index 9ffb00db0..ad7b11985 100644 --- a/deps/v8/src/ast.h +++ b/deps/v8/src/ast.h @@ -278,7 +278,9 @@ class SmallMapList { int length() const { return list_.length(); } void AddMapIfMissing(Handle map, Zone* zone) { - map = Map::CurrentMapForDeprecated(map); + Map* updated = map->CurrentMapForDeprecated(); + if (updated == NULL) return; + map = Handle(updated); for (int i = 0; i < length(); ++i) { if (at(i).is_identical_to(map)) return; } @@ -286,6 +288,7 @@ class SmallMapList { } void Add(Handle handle, Zone* zone) { + ASSERT(!handle->is_deprecated()); list_.Add(handle.location(), zone); } @@ -1992,6 +1995,18 @@ class Yield: public Expression { Kind yield_kind() const { return yield_kind_; } virtual int position() const { return pos_; } + // Delegating yield surrounds the "yield" in a "try/catch". This index + // locates the catch handler in the handler table, and is equivalent to + // TryCatchStatement::index(). + int index() const { + ASSERT(yield_kind() == DELEGATING); + return index_; + } + void set_index(int index) { + ASSERT(yield_kind() == DELEGATING); + index_ = index; + } + protected: Yield(Isolate* isolate, Expression* generator_object, @@ -2002,12 +2017,14 @@ class Yield: public Expression { generator_object_(generator_object), expression_(expression), yield_kind_(yield_kind), + index_(-1), pos_(pos) { } private: Expression* generator_object_; Expression* expression_; Kind yield_kind_; + int index_; int pos_; }; diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc index b0d3a5e50..7c9e4366e 100644 --- a/deps/v8/src/bootstrapper.cc +++ b/deps/v8/src/bootstrapper.cc @@ -95,6 +95,10 @@ Handle Bootstrapper::NativesSourceLookup(int index) { void Bootstrapper::Initialize(bool create_heap_objects) { extensions_cache_.Initialize(create_heap_objects); +} + + +void Bootstrapper::InitializeOncePerProcess() { GCExtension::Register(); ExternalizeStringExtension::Register(); StatisticsExtension::Register(); @@ -201,7 +205,8 @@ class Genesis BASE_EMBEDDED { ElementsKind elements_kind); bool InstallNatives(); - Handle InstallTypedArray(const char* name); + Handle InstallTypedArray(const char* name, + ElementsKind elementsKind); bool InstallExperimentalNatives(); void InstallBuiltinFunctionIds(); void InstallJSFunctionResultCaches(); @@ -281,12 +286,12 @@ class Genesis BASE_EMBEDDED { Handle result_; Handle native_context_; - // Function instance maps. Function literal maps are created initially with - // a read only prototype for the processing of JS builtins. Later the function - // instance maps are replaced in order to make prototype writable. - // These are the final, writable prototype, maps. - Handle function_instance_map_writable_prototype_; - Handle strict_mode_function_instance_map_writable_prototype_; + // Function maps. Function maps are created initially with a read only + // prototype for the processing of JS builtins. Later the function maps are + // replaced in order to make prototype writable. These are the final, writable + // prototype, maps. + Handle function_map_writable_prototype_; + Handle strict_mode_function_map_writable_prototype_; Handle throw_type_error_function; BootstrapperActive active_; @@ -349,7 +354,8 @@ static Handle InstallFunction(Handle target, int instance_size, Handle prototype, Builtins::Name call, - bool is_ecma_native) { + bool install_initial_map, + bool set_instance_class_name) { Isolate* isolate = target->GetIsolate(); Factory* factory = isolate->factory(); Handle internalized_name = factory->InternalizeUtf8String(name); @@ -361,7 +367,7 @@ static Handle InstallFunction(Handle target, instance_size, prototype, call_code, - is_ecma_native); + install_initial_map); PropertyAttributes attributes; if (target->IsJSBuiltinsObject()) { attributes = @@ -372,7 +378,7 @@ static Handle InstallFunction(Handle target, CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetLocalPropertyIgnoreAttributes( target, internalized_name, function, attributes)); - if (is_ecma_native) { + if (set_instance_class_name) { function->shared()->set_instance_class_name(*internalized_name); } function->shared()->set_native(true); @@ -437,12 +443,6 @@ Handle Genesis::CreateEmptyFunction(Isolate* isolate) { // Allocate the map for function instances. Maps are allocated first and their // prototypes patched later, once empty function is created. - // Please note that the prototype property for function instances must be - // writable. - Handle function_instance_map = - CreateFunctionMap(ADD_WRITEABLE_PROTOTYPE); - native_context()->set_function_instance_map(*function_instance_map); - // Functions with this map will not have a 'prototype' property, and // can not be used as constructors. Handle function_without_prototype_map = @@ -458,13 +458,11 @@ Handle Genesis::CreateEmptyFunction(Isolate* isolate) { // The final map for functions. Writeable prototype. // This map is installed in MakeFunctionInstancePrototypeWritable. - function_instance_map_writable_prototype_ = - CreateFunctionMap(ADD_WRITEABLE_PROTOTYPE); + function_map_writable_prototype_ = CreateFunctionMap(ADD_WRITEABLE_PROTOTYPE); Factory* factory = isolate->factory(); - Heap* heap = isolate->heap(); - Handle object_name = Handle(heap->Object_string()); + Handle object_name = factory->Object_string(); { // --- O b j e c t --- Handle object_fun = @@ -482,6 +480,10 @@ Handle Genesis::CreateEmptyFunction(Isolate* isolate) { TENURED); native_context()->set_initial_object_prototype(*prototype); + // For bootstrapping set the array prototype to be the same as the object + // prototype, otherwise the missing initial_array_prototype will cause + // assertions during startup. + native_context()->set_initial_array_prototype(*prototype); SetPrototype(object_fun, prototype); } @@ -509,10 +511,9 @@ Handle Genesis::CreateEmptyFunction(Isolate* isolate) { // Set prototypes for the function maps. native_context()->function_map()->set_prototype(*empty_function); - native_context()->function_instance_map()->set_prototype(*empty_function); native_context()->function_without_prototype_map()-> set_prototype(*empty_function); - function_instance_map_writable_prototype_->set_prototype(*empty_function); + function_map_writable_prototype_->set_prototype(*empty_function); // Allocate the function map first and then patch the prototype later Handle empty_function_map = CreateFunctionMap(DONT_ADD_PROTOTYPE); @@ -601,12 +602,6 @@ Handle Genesis::CreateStrictModeFunctionMap( void Genesis::CreateStrictModeFunctionMaps(Handle empty) { - // Allocate map for the strict mode function instances. - Handle strict_mode_function_instance_map = - CreateStrictModeFunctionMap(ADD_WRITEABLE_PROTOTYPE, empty); - native_context()->set_strict_mode_function_instance_map( - *strict_mode_function_instance_map); - // Allocate map for the prototype-less strict mode instances. Handle strict_mode_function_without_prototype_map = CreateStrictModeFunctionMap(DONT_ADD_PROTOTYPE, empty); @@ -623,15 +618,13 @@ void Genesis::CreateStrictModeFunctionMaps(Handle empty) { // The final map for the strict mode functions. Writeable prototype. // This map is installed in MakeFunctionInstancePrototypeWritable. - strict_mode_function_instance_map_writable_prototype_ = + strict_mode_function_map_writable_prototype_ = CreateStrictModeFunctionMap(ADD_WRITEABLE_PROTOTYPE, empty); // Complete the callbacks. - PoisonArgumentsAndCaller(strict_mode_function_instance_map); PoisonArgumentsAndCaller(strict_mode_function_without_prototype_map); PoisonArgumentsAndCaller(strict_mode_function_map); - PoisonArgumentsAndCaller( - strict_mode_function_instance_map_writable_prototype_); + PoisonArgumentsAndCaller(strict_mode_function_map_writable_prototype_); } @@ -846,7 +839,7 @@ bool Genesis::InitializeGlobal(Handle inner_global, Factory* factory = isolate->factory(); Heap* heap = isolate->heap(); - Handle object_name = Handle(heap->Object_string()); + Handle object_name = factory->Object_string(); CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetLocalPropertyIgnoreAttributes( inner_global, object_name, @@ -856,13 +849,13 @@ bool Genesis::InitializeGlobal(Handle inner_global, // Install global Function object InstallFunction(global, "Function", JS_FUNCTION_TYPE, JSFunction::kSize, - empty_function, Builtins::kIllegal, true); // ECMA native. + empty_function, Builtins::kIllegal, true, true); { // --- A r r a y --- Handle array_function = InstallFunction(global, "Array", JS_ARRAY_TYPE, JSArray::kSize, isolate->initial_object_prototype(), - Builtins::kArrayCode, true); + Builtins::kArrayCode, true, true); array_function->shared()->DontAdaptArguments(); // This seems a bit hackish, but we need to make sure Array.length @@ -906,7 +899,7 @@ bool Genesis::InitializeGlobal(Handle inner_global, Handle number_fun = InstallFunction(global, "Number", JS_VALUE_TYPE, JSValue::kSize, isolate->initial_object_prototype(), - Builtins::kIllegal, true); + Builtins::kIllegal, true, true); native_context()->set_number_function(*number_fun); } @@ -914,7 +907,7 @@ bool Genesis::InitializeGlobal(Handle inner_global, Handle boolean_fun = InstallFunction(global, "Boolean", JS_VALUE_TYPE, JSValue::kSize, isolate->initial_object_prototype(), - Builtins::kIllegal, true); + Builtins::kIllegal, true, true); native_context()->set_boolean_function(*boolean_fun); } @@ -922,7 +915,7 @@ bool Genesis::InitializeGlobal(Handle inner_global, Handle string_fun = InstallFunction(global, "String", JS_VALUE_TYPE, JSValue::kSize, isolate->initial_object_prototype(), - Builtins::kIllegal, true); + Builtins::kIllegal, true, true); string_fun->shared()->set_construct_stub( isolate->builtins()->builtin(Builtins::kStringConstructCode)); native_context()->set_string_function(*string_fun); @@ -950,7 +943,7 @@ bool Genesis::InitializeGlobal(Handle inner_global, Handle date_fun = InstallFunction(global, "Date", JS_DATE_TYPE, JSDate::kSize, isolate->initial_object_prototype(), - Builtins::kIllegal, true); + Builtins::kIllegal, true, true); native_context()->set_date_function(*date_fun); } @@ -961,7 +954,7 @@ bool Genesis::InitializeGlobal(Handle inner_global, Handle regexp_fun = InstallFunction(global, "RegExp", JS_REGEXP_TYPE, JSRegExp::kSize, isolate->initial_object_prototype(), - Builtins::kIllegal, true); + Builtins::kIllegal, true, true); native_context()->set_regexp_function(*regexp_fun); ASSERT(regexp_fun->has_initial_map()); @@ -1276,11 +1269,18 @@ bool Genesis::InitializeGlobal(Handle inner_global, } -Handle Genesis::InstallTypedArray(const char* name) { +Handle Genesis::InstallTypedArray( + const char* name, ElementsKind elementsKind) { Handle global = Handle(native_context()->global_object()); - return InstallFunction(global, name, JS_TYPED_ARRAY_TYPE, + Handle result = InstallFunction(global, name, JS_TYPED_ARRAY_TYPE, JSTypedArray::kSize, isolate()->initial_object_prototype(), - Builtins::kIllegal, true); + Builtins::kIllegal, false, true); + + Handle initial_map = isolate()->factory()->NewMap( + JS_TYPED_ARRAY_TYPE, JSTypedArray::kSize, elementsKind); + result->set_initial_map(*initial_map); + initial_map->set_constructor(*result); + return result; } @@ -1295,7 +1295,7 @@ void Genesis::InitializeExperimentalGlobal() { Handle symbol_fun = InstallFunction(global, "Symbol", JS_VALUE_TYPE, JSValue::kSize, isolate()->initial_object_prototype(), - Builtins::kIllegal, true); + Builtins::kIllegal, true, true); native_context()->set_symbol_function(*symbol_fun); } @@ -1303,17 +1303,17 @@ void Genesis::InitializeExperimentalGlobal() { { // -- S e t InstallFunction(global, "Set", JS_SET_TYPE, JSSet::kSize, isolate()->initial_object_prototype(), - Builtins::kIllegal, true); + Builtins::kIllegal, true, true); } { // -- M a p InstallFunction(global, "Map", JS_MAP_TYPE, JSMap::kSize, isolate()->initial_object_prototype(), - Builtins::kIllegal, true); + Builtins::kIllegal, true, true); } { // -- W e a k M a p InstallFunction(global, "WeakMap", JS_WEAK_MAP_TYPE, JSWeakMap::kSize, isolate()->initial_object_prototype(), - Builtins::kIllegal, true); + Builtins::kIllegal, true, true); } } @@ -1323,29 +1323,38 @@ void Genesis::InitializeExperimentalGlobal() { InstallFunction(global, "ArrayBuffer", JS_ARRAY_BUFFER_TYPE, JSArrayBuffer::kSize, isolate()->initial_object_prototype(), - Builtins::kIllegal, true); + Builtins::kIllegal, true, true); native_context()->set_array_buffer_fun(*array_buffer_fun); } if (FLAG_harmony_typed_arrays) { // -- T y p e d A r r a y s - Handle int8_fun = InstallTypedArray("Int8Array"); + Handle int8_fun = InstallTypedArray("Int8Array", + EXTERNAL_BYTE_ELEMENTS); native_context()->set_int8_array_fun(*int8_fun); - Handle uint8_fun = InstallTypedArray("Uint8Array"); + Handle uint8_fun = InstallTypedArray("Uint8Array", + EXTERNAL_UNSIGNED_BYTE_ELEMENTS); native_context()->set_uint8_array_fun(*uint8_fun); - Handle int16_fun = InstallTypedArray("Int16Array"); + Handle int16_fun = InstallTypedArray("Int16Array", + EXTERNAL_SHORT_ELEMENTS); native_context()->set_int16_array_fun(*int16_fun); - Handle uint16_fun = InstallTypedArray("Uint16Array"); + Handle uint16_fun = InstallTypedArray("Uint16Array", + EXTERNAL_UNSIGNED_SHORT_ELEMENTS); native_context()->set_uint16_array_fun(*uint16_fun); - Handle int32_fun = InstallTypedArray("Int32Array"); + Handle int32_fun = InstallTypedArray("Int32Array", + EXTERNAL_INT_ELEMENTS); native_context()->set_int32_array_fun(*int32_fun); - Handle uint32_fun = InstallTypedArray("Uint32Array"); + Handle uint32_fun = InstallTypedArray("Uint32Array", + EXTERNAL_UNSIGNED_INT_ELEMENTS); native_context()->set_uint32_array_fun(*uint32_fun); - Handle float_fun = InstallTypedArray("Float32Array"); + Handle float_fun = InstallTypedArray("Float32Array", + EXTERNAL_FLOAT_ELEMENTS); native_context()->set_float_array_fun(*float_fun); - Handle double_fun = InstallTypedArray("Float64Array"); + Handle double_fun = InstallTypedArray("Float64Array", + EXTERNAL_DOUBLE_ELEMENTS); native_context()->set_double_array_fun(*double_fun); - Handle uint8c_fun = InstallTypedArray("Uint8ClampedArray"); + Handle uint8c_fun = InstallTypedArray("Uint8ClampedArray", + EXTERNAL_PIXEL_ELEMENTS); native_context()->set_uint8c_array_fun(*uint8c_fun); } @@ -1358,11 +1367,11 @@ void Genesis::InitializeExperimentalGlobal() { InstallFunction(builtins, "GeneratorFunctionPrototype", JS_FUNCTION_TYPE, JSFunction::kHeaderSize, generator_object_prototype, Builtins::kIllegal, - false); + false, false); InstallFunction(builtins, "GeneratorFunction", JS_FUNCTION_TYPE, JSFunction::kSize, generator_function_prototype, Builtins::kIllegal, - false); + false, false); // Create maps for generator functions and their prototypes. Store those // maps in the native context. @@ -1590,7 +1599,7 @@ Handle Genesis::InstallInternalArray( JSArray::kSize, isolate()->initial_object_prototype(), Builtins::kInternalArrayCode, - true); + true, true); Handle prototype = factory()->NewJSObject(isolate()->object_function(), TENURED); SetPrototype(array_function, prototype); @@ -1690,7 +1699,7 @@ bool Genesis::InstallNatives() { Handle script_fun = InstallFunction(builtins, "Script", JS_VALUE_TYPE, JSValue::kSize, isolate()->initial_object_prototype(), - Builtins::kIllegal, false); + Builtins::kIllegal, false, false); Handle prototype = factory()->NewJSObject(isolate()->object_function(), TENURED); SetPrototype(script_fun, prototype); @@ -1846,7 +1855,7 @@ bool Genesis::InstallNatives() { InstallFunction(builtins, "OpaqueReference", JS_VALUE_TYPE, JSValue::kSize, isolate()->initial_object_prototype(), - Builtins::kIllegal, false); + Builtins::kIllegal, false, false); Handle prototype = factory()->NewJSObject(isolate()->object_function(), TENURED); SetPrototype(opaque_reference_fun, prototype); @@ -1910,12 +1919,12 @@ bool Genesis::InstallNatives() { InstallFunction(proto, "call", JS_OBJECT_TYPE, JSObject::kHeaderSize, Handle::null(), Builtins::kFunctionCall, - false); + false, false); Handle apply = InstallFunction(proto, "apply", JS_OBJECT_TYPE, JSObject::kHeaderSize, Handle::null(), Builtins::kFunctionApply, - false); + false, false); // Make sure that Function.prototype.call appears to be compiled. // The code will never be called, but inline caching for call will @@ -2381,6 +2390,10 @@ bool Genesis::ConfigureGlobalObjects( } SetObjectPrototype(global_proxy, inner_global); + + native_context()->set_initial_array_prototype( + JSArray::cast(native_context()->array_function()->prototype())); + return true; } @@ -2522,14 +2535,13 @@ void Genesis::MakeFunctionInstancePrototypeWritable() { // The maps with writable prototype are created in CreateEmptyFunction // and CreateStrictModeFunctionMaps respectively. Initially the maps are // created with read-only prototype for JS builtins processing. - ASSERT(!function_instance_map_writable_prototype_.is_null()); - ASSERT(!strict_mode_function_instance_map_writable_prototype_.is_null()); + ASSERT(!function_map_writable_prototype_.is_null()); + ASSERT(!strict_mode_function_map_writable_prototype_.is_null()); // Replace function instance maps to make prototype writable. - native_context()->set_function_map( - *function_instance_map_writable_prototype_); + native_context()->set_function_map(*function_map_writable_prototype_); native_context()->set_strict_mode_function_map( - *strict_mode_function_instance_map_writable_prototype_); + *strict_mode_function_map_writable_prototype_); } diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h index e33415eeb..476ac12e1 100644 --- a/deps/v8/src/bootstrapper.h +++ b/deps/v8/src/bootstrapper.h @@ -88,6 +88,8 @@ class SourceCodeCache BASE_EMBEDDED { // context. class Bootstrapper { public: + static void InitializeOncePerProcess(); + // Requires: Heap::SetUp has been called. void Initialize(bool create_heap_objects); void TearDown(); diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc index 661ee9437..81b600574 100644 --- a/deps/v8/src/builtins.cc +++ b/deps/v8/src/builtins.cc @@ -845,7 +845,7 @@ BUILTIN(ArraySlice) { if (start < kMinInt || start > kMaxInt) { return CallJsBuiltin(isolate, "ArraySlice", args); } - relative_start = static_cast(start); + relative_start = std::isnan(start) ? 0 : static_cast(start); } else if (!arg1->IsUndefined()) { return CallJsBuiltin(isolate, "ArraySlice", args); } @@ -858,7 +858,7 @@ BUILTIN(ArraySlice) { if (end < kMinInt || end > kMaxInt) { return CallJsBuiltin(isolate, "ArraySlice", args); } - relative_end = static_cast(end); + relative_end = std::isnan(end) ? 0 : static_cast(end); } else if (!arg2->IsUndefined()) { return CallJsBuiltin(isolate, "ArraySlice", args); } @@ -1317,15 +1317,13 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallHelper( LOG(isolate, ApiObjectAccess("call", JSObject::cast(*args.receiver()))); ASSERT(raw_holder->IsJSObject()); - CustomArguments custom(isolate); - v8::ImplementationUtilities::PrepareArgumentsData(custom.end(), - isolate, data_obj, *function, raw_holder); - - v8::Arguments new_args = v8::ImplementationUtilities::NewArguments( - custom.end(), - &args[0] - 1, - args.length() - 1, - is_construct); + FunctionCallbackArguments custom(isolate, + data_obj, + *function, + raw_holder, + &args[0] - 1, + args.length() - 1, + is_construct); v8::Handle value; { @@ -1333,7 +1331,7 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallHelper( VMState state(isolate); ExternalCallbackScope call_scope(isolate, v8::ToCData
(callback_obj)); - value = callback(new_args); + value = custom.Call(callback); } if (value.IsEmpty()) { result = heap->undefined_value(); @@ -1396,21 +1394,20 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallAsFunctionOrConstructor( HandleScope scope(isolate); LOG(isolate, ApiObjectAccess("call non-function", obj)); - CustomArguments custom(isolate); - v8::ImplementationUtilities::PrepareArgumentsData(custom.end(), - isolate, call_data->data(), constructor, obj); - v8::Arguments new_args = v8::ImplementationUtilities::NewArguments( - custom.end(), - &args[0] - 1, - args.length() - 1, - is_construct_call); + FunctionCallbackArguments custom(isolate, + call_data->data(), + constructor, + obj, + &args[0] - 1, + args.length() - 1, + is_construct_call); v8::Handle value; { // Leaving JavaScript. VMState state(isolate); ExternalCallbackScope call_scope(isolate, v8::ToCData
(callback_obj)); - value = callback(new_args); + value = custom.Call(callback); } if (value.IsEmpty()) { result = heap->undefined_value(); diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h index 6fc17c45c..58d1a8b14 100644 --- a/deps/v8/src/builtins.h +++ b/deps/v8/src/builtins.h @@ -107,6 +107,8 @@ enum BuiltinExtraArguments { Code::kNoExtraICState) \ V(NotifyDeoptimized, BUILTIN, UNINITIALIZED, \ Code::kNoExtraICState) \ + V(NotifySoftDeoptimized, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, \ Code::kNoExtraICState) \ V(NotifyStubFailure, BUILTIN, UNINITIALIZED, \ @@ -380,6 +382,7 @@ class Builtins { static void Generate_LazyCompile(MacroAssembler* masm); static void Generate_LazyRecompile(MacroAssembler* masm); static void Generate_NotifyDeoptimized(MacroAssembler* masm); + static void Generate_NotifySoftDeoptimized(MacroAssembler* masm); static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm); static void Generate_NotifyOSR(MacroAssembler* masm); static void Generate_NotifyStubFailure(MacroAssembler* masm); diff --git a/deps/v8/src/checks.cc b/deps/v8/src/checks.cc index a6405ecde..8bcde1c61 100644 --- a/deps/v8/src/checks.cc +++ b/deps/v8/src/checks.cc @@ -53,7 +53,7 @@ extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) { if (fatal_error_handler_nesting_depth < 3) { if (i::FLAG_stack_trace_on_abort) { // Call this one twice on double fault - i::Isolate::Current()->PrintStack(); + i::Isolate::Current()->PrintStack(stderr); } } i::OS::Abort(); diff --git a/deps/v8/src/code-stubs-hydrogen.cc b/deps/v8/src/code-stubs-hydrogen.cc index 31431b71c..6e837ddb9 100644 --- a/deps/v8/src/code-stubs-hydrogen.cc +++ b/deps/v8/src/code-stubs-hydrogen.cc @@ -418,7 +418,7 @@ HValue* CodeStubGraphBuilder::BuildCodeStub() { HInstruction* load = BuildUncheckedMonomorphicElementAccess( GetParameter(0), GetParameter(1), NULL, NULL, casted_stub()->is_js_array(), casted_stub()->elements_kind(), - false, STANDARD_STORE, Representation::Tagged()); + false, NEVER_RETURN_HOLE, STANDARD_STORE, Representation::Tagged()); return load; } @@ -463,7 +463,8 @@ HValue* CodeStubGraphBuilder::BuildCodeStub() { BuildUncheckedMonomorphicElementAccess( GetParameter(0), GetParameter(1), GetParameter(2), NULL, casted_stub()->is_js_array(), casted_stub()->elements_kind(), - true, casted_stub()->store_mode(), Representation::Tagged()); + true, NEVER_RETURN_HOLE, casted_stub()->store_mode(), + Representation::Tagged()); return GetParameter(2); } diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc index aa2c82172..312febc1a 100644 --- a/deps/v8/src/code-stubs.cc +++ b/deps/v8/src/code-stubs.cc @@ -232,37 +232,37 @@ void BinaryOpStub::Generate(MacroAssembler* masm) { void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) { switch (op_) { case Token::ADD: - __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); + __ InvokeBuiltin(Builtins::ADD, CALL_FUNCTION); break; case Token::SUB: - __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); + __ InvokeBuiltin(Builtins::SUB, CALL_FUNCTION); break; case Token::MUL: - __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); + __ InvokeBuiltin(Builtins::MUL, CALL_FUNCTION); break; case Token::DIV: - __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); + __ InvokeBuiltin(Builtins::DIV, CALL_FUNCTION); break; case Token::MOD: - __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); + __ InvokeBuiltin(Builtins::MOD, CALL_FUNCTION); break; case Token::BIT_OR: - __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); + __ InvokeBuiltin(Builtins::BIT_OR, CALL_FUNCTION); break; case Token::BIT_AND: - __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION); + __ InvokeBuiltin(Builtins::BIT_AND, CALL_FUNCTION); break; case Token::BIT_XOR: - __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION); + __ InvokeBuiltin(Builtins::BIT_XOR, CALL_FUNCTION); break; case Token::SAR: - __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION); + __ InvokeBuiltin(Builtins::SAR, CALL_FUNCTION); break; case Token::SHR: - __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); + __ InvokeBuiltin(Builtins::SHR, CALL_FUNCTION); break; case Token::SHL: - __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION); + __ InvokeBuiltin(Builtins::SHL, CALL_FUNCTION); break; default: UNREACHABLE(); @@ -408,41 +408,50 @@ void ICCompareStub::Generate(MacroAssembler* masm) { } -CompareNilICStub::Types CompareNilICStub::GetPatchedICFlags( - Code::ExtraICState extra_ic_state, - Handle object, - bool* already_monomorphic) { - Types types = TypesField::decode(extra_ic_state); - NilValue nil = NilValueField::decode(extra_ic_state); - EqualityKind kind = EqualityKindField::decode(extra_ic_state); - ASSERT(types != CompareNilICStub::kFullCompare); - *already_monomorphic = - (types & CompareNilICStub::kCompareAgainstMonomorphicMap) != 0; - if (kind == kStrictEquality) { - if (nil == kNullValue) { - return CompareNilICStub::kCompareAgainstNull; - } else { - return CompareNilICStub::kCompareAgainstUndefined; - } +void CompareNilICStub::Record(Handle object) { + ASSERT(types_ != Types::FullCompare()); + if (equality_kind_ == kStrictEquality) { + // When testing for strict equality only one value will evaluate to true + types_.RemoveAll(); + types_.Add((nil_value_ == kNullValue) ? NULL_TYPE: + UNDEFINED); } else { if (object->IsNull()) { - types = static_cast( - types | CompareNilICStub::kCompareAgainstNull); + types_.Add(NULL_TYPE); } else if (object->IsUndefined()) { - types = static_cast( - types | CompareNilICStub::kCompareAgainstUndefined); + types_.Add(UNDEFINED); } else if (object->IsUndetectableObject() || object->IsOddball() || !object->IsHeapObject()) { - types = CompareNilICStub::kFullCompare; - } else if ((types & CompareNilICStub::kCompareAgainstMonomorphicMap) != 0) { - types = CompareNilICStub::kFullCompare; + types_ = Types::FullCompare(); + } else if (IsMonomorphic()) { + types_ = Types::FullCompare(); } else { - types = static_cast( - types | CompareNilICStub::kCompareAgainstMonomorphicMap); + types_.Add(MONOMORPHIC_MAP); } } - return types; +} + + +void CompareNilICStub::PrintName(StringStream* stream) { + stream->Add("CompareNilICStub_"); + types_.Print(stream); + stream->Add((nil_value_ == kNullValue) ? "(NullValue|": + "(UndefinedValue|"); + stream->Add((equality_kind_ == kStrictEquality) ? "StrictEquality)": + "NonStrictEquality)"); +} + + +void CompareNilICStub::Types::Print(StringStream* stream) const { + stream->Add("("); + SimpleListPrinter printer(stream); + if (IsEmpty()) printer.Add("None"); + if (Contains(UNDEFINED)) printer.Add("Undefined"); + if (Contains(NULL_TYPE)) printer.Add("Null"); + if (Contains(MONOMORPHIC_MAP)) printer.Add("MonomorphicMap"); + if (Contains(UNDETECTABLE)) printer.Add("Undetectable"); + stream->Add(")"); } @@ -552,15 +561,18 @@ void ToBooleanStub::PrintName(StringStream* stream) { void ToBooleanStub::Types::Print(StringStream* stream) const { - if (IsEmpty()) stream->Add("None"); - if (Contains(UNDEFINED)) stream->Add("Undefined"); - if (Contains(BOOLEAN)) stream->Add("Bool"); - if (Contains(NULL_TYPE)) stream->Add("Null"); - if (Contains(SMI)) stream->Add("Smi"); - if (Contains(SPEC_OBJECT)) stream->Add("SpecObject"); - if (Contains(STRING)) stream->Add("String"); - if (Contains(SYMBOL)) stream->Add("Symbol"); - if (Contains(HEAP_NUMBER)) stream->Add("HeapNumber"); + stream->Add("("); + SimpleListPrinter printer(stream); + if (IsEmpty()) printer.Add("None"); + if (Contains(UNDEFINED)) printer.Add("Undefined"); + if (Contains(BOOLEAN)) printer.Add("Bool"); + if (Contains(NULL_TYPE)) printer.Add("Null"); + if (Contains(SMI)) printer.Add("Smi"); + if (Contains(SPEC_OBJECT)) printer.Add("SpecObject"); + if (Contains(STRING)) printer.Add("String"); + if (Contains(SYMBOL)) printer.Add("Symbol"); + if (Contains(HEAP_NUMBER)) printer.Add("HeapNumber"); + stream->Add(")"); } diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h index 646aee23e..aa6a41019 100644 --- a/deps/v8/src/code-stubs.h +++ b/deps/v8/src/code-stubs.h @@ -1047,41 +1047,52 @@ class ICCompareStub: public PlatformCodeStub { class CompareNilICStub : public HydrogenCodeStub { public: - enum Types { - kCompareAgainstNull = 1 << 0, - kCompareAgainstUndefined = 1 << 1, - kCompareAgainstMonomorphicMap = 1 << 2, - kCompareAgainstUndetectable = 1 << 3, - kFullCompare = kCompareAgainstNull | kCompareAgainstUndefined | - kCompareAgainstUndetectable + enum Type { + UNDEFINED, + NULL_TYPE, + MONOMORPHIC_MAP, + UNDETECTABLE, + NUMBER_OF_TYPES + }; + + class Types : public EnumSet { + public: + Types() : EnumSet(0) { } + explicit Types(byte bits) : EnumSet(bits) { } + + static Types FullCompare() { + Types set; + set.Add(UNDEFINED); + set.Add(NULL_TYPE); + set.Add(UNDETECTABLE); + return set; + } + + void Print(StringStream* stream) const; }; + // At most 6 different types can be distinguished, because the Code object + // only has room for a single byte to hold a set and there are two more + // boolean flags we need to store. :-P + STATIC_ASSERT(NUMBER_OF_TYPES <= 6); + CompareNilICStub(EqualityKind kind, NilValue nil, Types types) - : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS), bit_field_(0) { - bit_field_ = EqualityKindField::encode(kind) | - NilValueField::encode(nil) | - TypesField::encode(types); + : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS), types_(types) { + equality_kind_ = kind; + nil_value_ = nil; } - virtual InlineCacheState GetICState() { - Types types = GetTypes(); - if (types == kFullCompare) { - return MEGAMORPHIC; - } else if ((types & kCompareAgainstMonomorphicMap) != 0) { - return MONOMORPHIC; - } else { - return PREMONOMORPHIC; - } + explicit CompareNilICStub(Code::ExtraICState ic_state) + : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS) { + equality_kind_ = EqualityKindField::decode(ic_state); + nil_value_ = NilValueField::decode(ic_state); + types_ = Types(ExtractTypesFromExtraICState(ic_state)); } - virtual Code::Kind GetCodeKind() const { return Code::COMPARE_NIL_IC; } - - Handle GenerateCode(); - static Handle GetUninitialized(Isolate* isolate, EqualityKind kind, NilValue nil) { - return CompareNilICStub(kind, nil).GetCode(isolate); + return CompareNilICStub(kind, nil, CODE_STUB_IS_MISS).GetCode(isolate); } virtual void InitializeInterfaceDescriptor( @@ -1089,53 +1100,76 @@ class CompareNilICStub : public HydrogenCodeStub { CodeStubInterfaceDescriptor* descriptor); static void InitializeForIsolate(Isolate* isolate) { - CompareNilICStub compare_stub(kStrictEquality, kNullValue); + CompareNilICStub compare_stub(kStrictEquality, kNullValue, + CODE_STUB_IS_MISS); compare_stub.InitializeInterfaceDescriptor( isolate, isolate->code_stub_interface_descriptor(CodeStub::CompareNilIC)); } - virtual Code::ExtraICState GetExtraICState() { - return bit_field_; + virtual InlineCacheState GetICState() { + if (types_ == Types::FullCompare()) { + return MEGAMORPHIC; + } else if (types_.Contains(MONOMORPHIC_MAP)) { + return MONOMORPHIC; + } else { + return PREMONOMORPHIC; + } } - EqualityKind GetKind() { return EqualityKindField::decode(bit_field_); } - NilValue GetNilValue() { return NilValueField::decode(bit_field_); } - Types GetTypes() { return TypesField::decode(bit_field_); } + virtual Code::Kind GetCodeKind() const { return Code::COMPARE_NIL_IC; } - static Types TypesFromExtraICState( - Code::ExtraICState state) { - return TypesField::decode(state); + Handle GenerateCode(); + + // extra ic state = nil_value | equality_kind | type_n-1 | ... | type_0 + virtual Code::ExtraICState GetExtraICState() { + return NilValueField::encode(nil_value_) | + EqualityKindField::encode(equality_kind_) | + types_.ToIntegral(); } - static EqualityKind EqualityKindFromExtraICState( + static byte ExtractTypesFromExtraICState( Code::ExtraICState state) { - return EqualityKindField::decode(state); - } - static NilValue NilValueFromExtraICState(Code::ExtraICState state) { - return NilValueField::decode(state); + return state & ((1< object, - bool* already_monomorphic); + void Record(Handle object); + + bool IsMonomorphic() const { return types_.Contains(MONOMORPHIC_MAP); } + EqualityKind GetKind() const { return equality_kind_; } + NilValue GetNilValue() const { return nil_value_; } + Types GetTypes() const { return types_; } + void ClearTypes() { types_.RemoveAll(); } + void SetKind(EqualityKind kind) { equality_kind_ = kind; } + + virtual void PrintName(StringStream* stream); private: friend class CompareNilIC; - class EqualityKindField : public BitField {}; - class NilValueField : public BitField {}; - class TypesField : public BitField {}; + CompareNilICStub(EqualityKind kind, NilValue nil, + InitializationState init_state) + : HydrogenCodeStub(init_state), types_(0) { + equality_kind_ = kind; + nil_value_ = nil; + } - CompareNilICStub(EqualityKind kind, NilValue nil) - : HydrogenCodeStub(CODE_STUB_IS_MISS), bit_field_(0) { - bit_field_ = EqualityKindField::encode(kind) | - NilValueField::encode(nil); + CompareNilICStub(Code::ExtraICState ic_state, InitializationState init_state) + : HydrogenCodeStub(init_state) { + equality_kind_ = EqualityKindField::decode(ic_state); + nil_value_ = NilValueField::decode(ic_state); + types_ = Types(ExtractTypesFromExtraICState(ic_state)); } + class EqualityKindField : public BitField { + }; + class NilValueField : public BitField {}; + virtual CodeStub::Major MajorKey() { return CompareNilIC; } - virtual int NotMissMinorKey() { return bit_field_; } + virtual int NotMissMinorKey() { return GetExtraICState(); } - int bit_field_; + EqualityKind equality_kind_; + NilValue nil_value_; + Types types_; DISALLOW_COPY_AND_ASSIGN(CompareNilICStub); }; @@ -1795,26 +1829,17 @@ class ToBooleanStub: public PlatformCodeStub { // only has room for a single byte to hold a set of these types. :-P STATIC_ASSERT(NUMBER_OF_TYPES <= 8); - class Types { + class Types : public EnumSet { public: Types() {} - explicit Types(byte bits) : set_(bits) {} + explicit Types(byte bits) : EnumSet(bits) {} - bool IsEmpty() const { return set_.IsEmpty(); } - bool Contains(Type type) const { return set_.Contains(type); } - bool ContainsAnyOf(Types types) const { - return set_.ContainsAnyOf(types.set_); - } - void Add(Type type) { set_.Add(type); } - byte ToByte() const { return set_.ToIntegral(); } + byte ToByte() const { return ToIntegral(); } void Print(StringStream* stream) const; void TraceTransition(Types to) const; bool Record(Handle object); bool NeedsMap() const; bool CanBeUndetectable() const; - - private: - EnumSet set_; }; static Types no_types() { return Types(); } @@ -1831,7 +1856,8 @@ class ToBooleanStub: public PlatformCodeStub { private: Major MajorKey() { return ToBoolean; } - int MinorKey() { return (tos_.code() << NUMBER_OF_TYPES) | types_.ToByte(); } + int MinorKey() { return (tos_.code() << NUMBER_OF_TYPES) | + types_.ToByte(); } virtual void FinishCode(Handle code) { code->set_to_boolean_state(types_.ToByte()); diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc index b7ff92a7c..504575803 100644 --- a/deps/v8/src/compiler.cc +++ b/deps/v8/src/compiler.cc @@ -103,6 +103,8 @@ void CompilationInfo::Initialize(Isolate* isolate, Mode mode, Zone* zone) { code_stub_ = NULL; prologue_offset_ = kPrologueOffsetNotSet; opt_count_ = shared_info().is_null() ? 0 : shared_info()->opt_count(); + no_frame_ranges_ = isolate->cpu_profiler()->is_profiling() + ? new List(2) : NULL; if (mode == STUB) { mode_ = STUB; return; @@ -121,6 +123,7 @@ void CompilationInfo::Initialize(Isolate* isolate, Mode mode, Zone* zone) { CompilationInfo::~CompilationInfo() { delete deferred_handles_; + delete no_frame_ranges_; } @@ -216,9 +219,8 @@ void OptimizingCompiler::RecordOptimizationStats() { double ms_optimize = static_cast(time_taken_to_optimize_) / 1000; double ms_codegen = static_cast(time_taken_to_codegen_) / 1000; if (FLAG_trace_opt) { - PrintF("[optimizing: "); - function->PrintName(); - PrintF(" / %" V8PRIxPTR, reinterpret_cast(*function)); + PrintF("[optimizing "); + function->ShortPrint(); PrintF(" - took %0.3f, %0.3f, %0.3f ms]\n", ms_creategraph, ms_optimize, ms_codegen); } @@ -315,15 +317,9 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() { } // Take --hydrogen-filter into account. - Handle name = info()->function()->debug_name(); - if (*FLAG_hydrogen_filter != '\0') { - Vector filter = CStrVector(FLAG_hydrogen_filter); - if ((filter[0] == '-' - && name->IsUtf8EqualTo(filter.SubVector(1, filter.length()))) - || (filter[0] != '-' && !name->IsUtf8EqualTo(filter))) { + if (!info()->closure()->PassesHydrogenFilter()) { info()->SetCode(code); return SetLastStatus(BAILED_OUT); - } } // Recompile the unoptimized version of the code if the current version @@ -360,6 +356,7 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() { ASSERT(info()->shared_info()->has_deoptimization_support()); if (FLAG_trace_hydrogen) { + Handle name = info()->function()->debug_name(); PrintF("-----------------------------------------------------------\n"); PrintF("Compiling method %s using hydrogen\n", *name->ToCString()); isolate()->GetHTracer()->TraceCompilation(info()); @@ -574,6 +571,7 @@ static Handle MakeFunctionInfo(CompilationInfo* info) { : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script), *info->code(), *result, + info, String::cast(script->name()))); GDBJIT(AddCode(Handle(String::cast(script->name())), script, @@ -586,6 +584,7 @@ static Handle MakeFunctionInfo(CompilationInfo* info) { : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script), *info->code(), *result, + info, isolate->heap()->empty_string())); GDBJIT(AddCode(Handle(), script, info->code(), info)); } @@ -813,6 +812,10 @@ static void InstallCodeCommon(CompilationInfo* info) { // reset this bit when lazy compiling the code again. if (shared->optimization_disabled()) code->set_optimizable(false); + if (shared->code() == *code) { + // Do not send compilation event for the same code twice. + return; + } Compiler::RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared); } @@ -843,9 +846,9 @@ static bool InstallCodeFromOptimizedCodeMap(CompilationInfo* info) { int index = shared->SearchOptimizedCodeMap(*native_context); if (index > 0) { if (FLAG_trace_opt) { - PrintF("[found optimized code for: "); - function->PrintName(); - PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast(*function)); + PrintF("[found optimized code for "); + function->ShortPrint(); + PrintF("]\n"); } // Caching of optimized code enabled and optimized code found. shared->InstallFromOptimizedCodeMap(*function, index); @@ -1157,6 +1160,7 @@ void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag, CodeCreateEvent(Logger::ToNativeByScript(tag, *script), *code, *shared, + info, String::cast(script->name()), line_num)); } else { @@ -1164,6 +1168,7 @@ void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag, CodeCreateEvent(Logger::ToNativeByScript(tag, *script), *code, *shared, + info, shared->DebugName())); } } diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h index 00074c899..dbb513ccd 100644 --- a/deps/v8/src/compiler.h +++ b/deps/v8/src/compiler.h @@ -47,6 +47,12 @@ enum ParseRestriction { ONLY_SINGLE_FUNCTION_LITERAL // Only a single FunctionLiteral expression. }; +struct OffsetRange { + OffsetRange(int from, int to) : from(from), to(to) {} + int from; + int to; +}; + // CompilationInfo encapsulates some information known at compile time. It // is constructed based on the resources available at compile-time. class CompilationInfo { @@ -257,6 +263,20 @@ class CompilationInfo { prologue_offset_ = prologue_offset; } + // Adds offset range [from, to) where fp register does not point + // to the current frame base. Used in CPU profiler to detect stack + // samples where top frame is not set up. + inline void AddNoFrameRange(int from, int to) { + if (no_frame_ranges_) no_frame_ranges_->Add(OffsetRange(from, to)); + } + + List* ReleaseNoFrameRanges() { + List* result = no_frame_ranges_; + no_frame_ranges_ = NULL; + return result; + } + + private: Isolate* isolate_; @@ -361,6 +381,8 @@ class CompilationInfo { int prologue_offset_; + List* no_frame_ranges_; + // A copy of shared_info()->opt_count() to avoid handle deref // during graph optimization. int opt_count_; diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h index 434b27414..86406e5a0 100644 --- a/deps/v8/src/contexts.h +++ b/deps/v8/src/contexts.h @@ -112,6 +112,7 @@ enum BindingFlags { V(JSON_OBJECT_INDEX, JSObject, json_object) \ V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \ V(INITIAL_OBJECT_PROTOTYPE_INDEX, JSObject, initial_object_prototype) \ + V(INITIAL_ARRAY_PROTOTYPE_INDEX, JSObject, initial_array_prototype) \ V(CREATE_DATE_FUN_INDEX, JSFunction, create_date_fun) \ V(TO_NUMBER_FUN_INDEX, JSFunction, to_number_fun) \ V(TO_STRING_FUN_INDEX, JSFunction, to_string_fun) \ @@ -138,9 +139,6 @@ enum BindingFlags { V(FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, function_without_prototype_map) \ V(STRICT_MODE_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, \ strict_mode_function_without_prototype_map) \ - V(FUNCTION_INSTANCE_MAP_INDEX, Map, function_instance_map) \ - V(STRICT_MODE_FUNCTION_INSTANCE_MAP_INDEX, Map, \ - strict_mode_function_instance_map) \ V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map)\ V(ARGUMENTS_BOILERPLATE_INDEX, JSObject, arguments_boilerplate) \ V(ALIASED_ARGUMENTS_BOILERPLATE_INDEX, JSObject, \ @@ -260,9 +258,8 @@ class Context: public FixedArray { STRICT_MODE_FUNCTION_MAP_INDEX, FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, STRICT_MODE_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, - FUNCTION_INSTANCE_MAP_INDEX, - STRICT_MODE_FUNCTION_INSTANCE_MAP_INDEX, INITIAL_OBJECT_PROTOTYPE_INDEX, + INITIAL_ARRAY_PROTOTYPE_INDEX, BOOLEAN_FUNCTION_INDEX, NUMBER_FUNCTION_INDEX, STRING_FUNCTION_INDEX, @@ -434,6 +431,10 @@ class Context: public FixedArray { ASSERT(IsNativeContext()); \ set(index, value); \ } \ + bool is_##name(type* value) { \ + ASSERT(IsNativeContext()); \ + return type::cast(get(index)) == value; \ + } \ type* name() { \ ASSERT(IsNativeContext()); \ return type::cast(get(index)); \ diff --git a/deps/v8/src/cpu-profiler.cc b/deps/v8/src/cpu-profiler.cc index 51d29423c..c30d4d44f 100644 --- a/deps/v8/src/cpu-profiler.cc +++ b/deps/v8/src/cpu-profiler.cc @@ -29,6 +29,7 @@ #include "cpu-profiler-inl.h" +#include "compiler.h" #include "frames-inl.h" #include "hashmap.h" #include "log-inl.h" @@ -80,7 +81,8 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag, int line_number, Address start, unsigned size, - Address shared) { + Address shared, + CompilationInfo* info) { if (FilterOutCodeCreateEvent(tag)) return; CodeEventsContainer evt_rec; CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_; @@ -88,6 +90,9 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag, rec->order = ++enqueue_order_; rec->start = start; rec->entry = profiles_->NewCodeEntry(tag, name, resource_name, line_number); + if (info) { + rec->entry->set_no_frame_ranges(info->ReleaseNoFrameRanges()); + } rec->size = size; rec->shared = shared; events_buffer_.Enqueue(evt_rec); @@ -323,6 +328,7 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, v8::CpuProfileNode::kNoLineNumberInfo, code->address(), code->ExecutableSize(), + NULL, NULL); } @@ -330,6 +336,7 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code, SharedFunctionInfo* shared, + CompilationInfo* info, Name* name) { processor_->CodeCreateEvent( tag, @@ -338,13 +345,15 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, v8::CpuProfileNode::kNoLineNumberInfo, code->address(), code->ExecutableSize(), - shared->address()); + shared->address(), + info); } void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code, SharedFunctionInfo* shared, + CompilationInfo* info, String* source, int line) { processor_->CodeCreateEvent( tag, @@ -353,7 +362,8 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, line, code->address(), code->ExecutableSize(), - shared->address()); + shared->address(), + info); } diff --git a/deps/v8/src/cpu-profiler.h b/deps/v8/src/cpu-profiler.h index da7ea6de2..2f8479fcc 100644 --- a/deps/v8/src/cpu-profiler.h +++ b/deps/v8/src/cpu-profiler.h @@ -40,6 +40,7 @@ namespace internal { // Forward declarations. class CodeEntry; class CodeMap; +class CompilationInfo; class CpuProfile; class CpuProfilesCollection; class ProfileGenerator; @@ -142,7 +143,8 @@ class ProfilerEventsProcessor : public Thread { Name* name, String* resource_name, int line_number, Address start, unsigned size, - Address shared); + Address shared, + CompilationInfo* info); void CodeCreateEvent(Logger::LogEventsAndTags tag, const char* name, Address start, unsigned size); @@ -227,11 +229,13 @@ class CpuProfiler { Code* code, Name* name); void CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code, - SharedFunctionInfo* shared, - Name* name); + SharedFunctionInfo* shared, + CompilationInfo* info, + Name* name); void CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code, SharedFunctionInfo* shared, + CompilationInfo* info, String* source, int line); void CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code, int args_count); diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc index 1889556e1..b95432e26 100644 --- a/deps/v8/src/d8.cc +++ b/deps/v8/src/d8.cc @@ -89,38 +89,15 @@ static Handle Throw(const char* message) { } -// TODO(rossberg): should replace these by proper uses of HasInstance, -// once we figure out a good way to make the templates global. -const char kArrayBufferMarkerPropName[] = "d8::_is_array_buffer_"; -const char kArrayMarkerPropName[] = "d8::_is_typed_array_"; - - -#define FOR_EACH_STRING(V) \ - V(ArrayBuffer, "ArrayBuffer") \ - V(ArrayBufferMarkerPropName, kArrayBufferMarkerPropName) \ - V(ArrayMarkerPropName, kArrayMarkerPropName) \ - V(buffer, "buffer") \ - V(byteLength, "byteLength") \ - V(byteOffset, "byteOffset") \ - V(BYTES_PER_ELEMENT, "BYTES_PER_ELEMENT") \ - V(length, "length") - class PerIsolateData { public: explicit PerIsolateData(Isolate* isolate) : isolate_(isolate), realms_(NULL) { HandleScope scope(isolate); -#define INIT_STRING(name, value) \ - name##_string_ = Persistent::New(isolate, String::NewSymbol(value)); - FOR_EACH_STRING(INIT_STRING) -#undef INIT_STRING isolate->SetData(this); } ~PerIsolateData() { -#define DISPOSE_STRING(name, value) name##_string_.Dispose(isolate_); - FOR_EACH_STRING(DISPOSE_STRING) -#undef DISPOSE_STRING isolate_->SetData(NULL); // Not really needed, just to be sure... } @@ -128,13 +105,6 @@ class PerIsolateData { return reinterpret_cast(isolate->GetData()); } -#define DEFINE_STRING_GETTER(name, value) \ - static Handle name##_string(Isolate* isolate) { \ - return Handle(*Get(isolate)->name##_string_); \ - } - FOR_EACH_STRING(DEFINE_STRING_GETTER) -#undef DEFINE_STRING_GETTER - class RealmScope { public: explicit RealmScope(PerIsolateData* data); @@ -153,10 +123,6 @@ class PerIsolateData { Persistent* realms_; Persistent realm_shared_; -#define DEFINE_MEMBER(name, value) Persistent name##_string_; - FOR_EACH_STRING(DEFINE_MEMBER) -#undef DEFINE_MEMBER - int RealmFind(Handle context); }; @@ -561,565 +527,6 @@ Handle Shell::Load(const Arguments& args) { return Undefined(args.GetIsolate()); } -static int32_t convertToInt(Local value_in, TryCatch* try_catch) { - if (value_in->IsInt32()) { - return value_in->Int32Value(); - } - - Local number = value_in->ToNumber(); - if (try_catch->HasCaught()) return 0; - - ASSERT(number->IsNumber()); - Local int32 = number->ToInt32(); - if (try_catch->HasCaught() || int32.IsEmpty()) return 0; - - int32_t value = int32->Int32Value(); - if (try_catch->HasCaught()) return 0; - - return value; -} - - -static int32_t convertToUint(Local value_in, TryCatch* try_catch) { - int32_t raw_value = convertToInt(value_in, try_catch); - if (try_catch->HasCaught()) return 0; - - if (raw_value < 0) { - Throw("Array length must not be negative."); - return 0; - } - - static const int kMaxLength = 0x3fffffff; -#ifndef V8_SHARED - ASSERT(kMaxLength == i::ExternalArray::kMaxLength); -#endif // V8_SHARED - if (raw_value > static_cast(kMaxLength)) { - Throw("Array length exceeds maximum length."); - } - return raw_value; -} - - -Handle Shell::CreateExternalArrayBuffer(Isolate* isolate, - Handle buffer, - int32_t length) { - static const int32_t kMaxSize = 0x7fffffff; - // Make sure the total size fits into a (signed) int. - if (length < 0 || length > kMaxSize) { - return Throw("ArrayBuffer exceeds maximum size (2G)"); - } - uint8_t* data = new uint8_t[length]; - if (data == NULL) { - return Throw("Memory allocation failed"); - } - memset(data, 0, length); - - buffer->SetHiddenValue( - PerIsolateData::ArrayBufferMarkerPropName_string(isolate), True()); - Persistent persistent_array = - Persistent::New(isolate, buffer); - persistent_array.MakeWeak(isolate, data, ExternalArrayWeakCallback); - persistent_array.MarkIndependent(isolate); - isolate->AdjustAmountOfExternalAllocatedMemory(length); - - buffer->SetIndexedPropertiesToExternalArrayData( - data, v8::kExternalByteArray, length); - buffer->Set(PerIsolateData::byteLength_string(isolate), - Int32::New(length, isolate), - ReadOnly); - - return buffer; -} - - -Handle Shell::ArrayBuffer(const Arguments& args) { - if (!args.IsConstructCall()) { - Handle* rec_args = new Handle[args.Length()]; - for (int i = 0; i < args.Length(); ++i) rec_args[i] = args[i]; - Handle result = args.Callee()->NewInstance(args.Length(), rec_args); - delete[] rec_args; - return result; - } - - if (args.Length() == 0) { - return Throw("ArrayBuffer constructor must have one argument"); - } - TryCatch try_catch; - int32_t length = convertToUint(args[0], &try_catch); - if (try_catch.HasCaught()) return try_catch.ReThrow(); - - return CreateExternalArrayBuffer(args.GetIsolate(), args.This(), length); -} - - -Handle Shell::CreateExternalArray(Isolate* isolate, - Handle array, - Handle buffer, - ExternalArrayType type, - int32_t length, - int32_t byteLength, - int32_t byteOffset, - int32_t element_size) { - ASSERT(element_size == 1 || element_size == 2 || - element_size == 4 || element_size == 8); - ASSERT(byteLength == length * element_size); - - void* data = buffer->GetIndexedPropertiesExternalArrayData(); - ASSERT(data != NULL); - - array->SetIndexedPropertiesToExternalArrayData( - static_cast(data) + byteOffset, type, length); - array->SetHiddenValue(PerIsolateData::ArrayMarkerPropName_string(isolate), - Int32::New(type, isolate)); - array->Set(PerIsolateData::byteLength_string(isolate), - Int32::New(byteLength, isolate), - ReadOnly); - array->Set(PerIsolateData::byteOffset_string(isolate), - Int32::New(byteOffset, isolate), - ReadOnly); - array->Set(PerIsolateData::length_string(isolate), - Int32::New(length, isolate), - ReadOnly); - array->Set(PerIsolateData::BYTES_PER_ELEMENT_string(isolate), - Int32::New(element_size, isolate)); - array->Set(PerIsolateData::buffer_string(isolate), - buffer, - ReadOnly); - - return array; -} - - -Handle Shell::CreateExternalArray(const Arguments& args, - ExternalArrayType type, - int32_t element_size) { - Isolate* isolate = args.GetIsolate(); - if (!args.IsConstructCall()) { - Handle* rec_args = new Handle[args.Length()]; - for (int i = 0; i < args.Length(); ++i) rec_args[i] = args[i]; - Handle result = args.Callee()->NewInstance(args.Length(), rec_args); - delete[] rec_args; - return result; - } - - TryCatch try_catch; - ASSERT(element_size == 1 || element_size == 2 || - element_size == 4 || element_size == 8); - - // All of the following constructors are supported: - // TypedArray(unsigned long length) - // TypedArray(type[] array) - // TypedArray(TypedArray array) - // TypedArray(ArrayBuffer buffer, - // optional unsigned long byteOffset, - // optional unsigned long length) - Handle buffer; - int32_t length; - int32_t byteLength; - int32_t byteOffset; - bool init_from_array = false; - if (args.Length() == 0) { - return Throw("Array constructor must have at least one argument"); - } - if (args[0]->IsObject() && - !args[0]->ToObject()->GetHiddenValue( - PerIsolateData::ArrayBufferMarkerPropName_string(isolate)).IsEmpty()) { - // Construct from ArrayBuffer. - buffer = args[0]->ToObject(); - int32_t bufferLength = convertToUint( - buffer->Get(PerIsolateData::byteLength_string(isolate)), &try_catch); - if (try_catch.HasCaught()) return try_catch.ReThrow(); - - if (args.Length() < 2 || args[1]->IsUndefined()) { - byteOffset = 0; - } else { - byteOffset = convertToUint(args[1], &try_catch); - if (try_catch.HasCaught()) return try_catch.ReThrow(); - if (byteOffset > bufferLength) { - return Throw("byteOffset out of bounds"); - } - if (byteOffset % element_size != 0) { - return Throw("byteOffset must be multiple of element size"); - } - } - - if (args.Length() < 3 || args[2]->IsUndefined()) { - byteLength = bufferLength - byteOffset; - length = byteLength / element_size; - if (byteLength % element_size != 0) { - return Throw("buffer size must be multiple of element size"); - } - } else { - length = convertToUint(args[2], &try_catch); - if (try_catch.HasCaught()) return try_catch.ReThrow(); - byteLength = length * element_size; - if (byteOffset + byteLength > bufferLength) { - return Throw("length out of bounds"); - } - } - } else { - if (args[0]->IsObject() && - args[0]->ToObject()->Has(PerIsolateData::length_string(isolate))) { - // Construct from array. - Local value = - args[0]->ToObject()->Get(PerIsolateData::length_string(isolate)); - if (try_catch.HasCaught()) return try_catch.ReThrow(); - length = convertToUint(value, &try_catch); - if (try_catch.HasCaught()) return try_catch.ReThrow(); - init_from_array = true; - } else { - // Construct from size. - length = convertToUint(args[0], &try_catch); - if (try_catch.HasCaught()) return try_catch.ReThrow(); - } - byteLength = length * element_size; - byteOffset = 0; - - Handle global = Context::GetCurrent()->Global(); - Handle array_buffer = - global->Get(PerIsolateData::ArrayBuffer_string(isolate)); - ASSERT(!try_catch.HasCaught() && array_buffer->IsFunction()); - Handle buffer_args[] = { Uint32::New(byteLength, isolate) }; - Handle result = Handle::Cast(array_buffer)->NewInstance( - 1, buffer_args); - if (try_catch.HasCaught()) return result; - buffer = result->ToObject(); - } - - Handle array = - CreateExternalArray(isolate, args.This(), buffer, type, length, - byteLength, byteOffset, element_size); - - if (init_from_array) { - Handle init = args[0]->ToObject(); - for (int i = 0; i < length; ++i) { - Local value = init->Get(i); - if (try_catch.HasCaught()) return try_catch.ReThrow(); - array->Set(i, value); - } - } - - return array; -} - - -Handle Shell::ArrayBufferSlice(const Arguments& args) { - TryCatch try_catch; - - if (!args.This()->IsObject()) { - return Throw("'slice' invoked on non-object receiver"); - } - - Isolate* isolate = args.GetIsolate(); - Local self = args.This(); - Local marker = self->GetHiddenValue( - PerIsolateData::ArrayBufferMarkerPropName_string(isolate)); - if (marker.IsEmpty()) { - return Throw("'slice' invoked on wrong receiver type"); - } - - int32_t length = convertToUint( - self->Get(PerIsolateData::byteLength_string(isolate)), &try_catch); - if (try_catch.HasCaught()) return try_catch.ReThrow(); - - if (args.Length() == 0) { - return Throw("'slice' must have at least one argument"); - } - int32_t begin = convertToInt(args[0], &try_catch); - if (try_catch.HasCaught()) return try_catch.ReThrow(); - if (begin < 0) begin += length; - if (begin < 0) begin = 0; - if (begin > length) begin = length; - - int32_t end; - if (args.Length() < 2 || args[1]->IsUndefined()) { - end = length; - } else { - end = convertToInt(args[1], &try_catch); - if (try_catch.HasCaught()) return try_catch.ReThrow(); - if (end < 0) end += length; - if (end < 0) end = 0; - if (end > length) end = length; - if (end < begin) end = begin; - } - - Local constructor = Local::Cast(self->GetConstructor()); - Handle new_args[] = { Uint32::New(end - begin, isolate) }; - Handle result = constructor->NewInstance(1, new_args); - if (try_catch.HasCaught()) return result; - Handle buffer = result->ToObject(); - uint8_t* dest = - static_cast(buffer->GetIndexedPropertiesExternalArrayData()); - uint8_t* src = begin + static_cast( - self->GetIndexedPropertiesExternalArrayData()); - memcpy(dest, src, end - begin); - - return buffer; -} - - -Handle Shell::ArraySubArray(const Arguments& args) { - TryCatch try_catch; - - if (!args.This()->IsObject()) { - return Throw("'subarray' invoked on non-object receiver"); - } - - Isolate* isolate = args.GetIsolate(); - Local self = args.This(); - Local marker = - self->GetHiddenValue(PerIsolateData::ArrayMarkerPropName_string(isolate)); - if (marker.IsEmpty()) { - return Throw("'subarray' invoked on wrong receiver type"); - } - - Handle buffer = - self->Get(PerIsolateData::buffer_string(isolate))->ToObject(); - if (try_catch.HasCaught()) return try_catch.ReThrow(); - int32_t length = convertToUint( - self->Get(PerIsolateData::length_string(isolate)), &try_catch); - if (try_catch.HasCaught()) return try_catch.ReThrow(); - int32_t byteOffset = convertToUint( - self->Get(PerIsolateData::byteOffset_string(isolate)), &try_catch); - if (try_catch.HasCaught()) return try_catch.ReThrow(); - int32_t element_size = convertToUint( - self->Get(PerIsolateData::BYTES_PER_ELEMENT_string(isolate)), &try_catch); - if (try_catch.HasCaught()) return try_catch.ReThrow(); - - if (args.Length() == 0) { - return Throw("'subarray' must have at least one argument"); - } - int32_t begin = convertToInt(args[0], &try_catch); - if (try_catch.HasCaught()) return try_catch.ReThrow(); - if (begin < 0) begin += length; - if (begin < 0) begin = 0; - if (begin > length) begin = length; - - int32_t end; - if (args.Length() < 2 || args[1]->IsUndefined()) { - end = length; - } else { - end = convertToInt(args[1], &try_catch); - if (try_catch.HasCaught()) return try_catch.ReThrow(); - if (end < 0) end += length; - if (end < 0) end = 0; - if (end > length) end = length; - if (end < begin) end = begin; - } - - length = end - begin; - byteOffset += begin * element_size; - - Local constructor = Local::Cast(self->GetConstructor()); - Handle construct_args[] = { - buffer, Uint32::New(byteOffset, isolate), Uint32::New(length, isolate) - }; - return constructor->NewInstance(3, construct_args); -} - - -Handle Shell::ArraySet(const Arguments& args) { - TryCatch try_catch; - - if (!args.This()->IsObject()) { - return Throw("'set' invoked on non-object receiver"); - } - - Isolate* isolate = args.GetIsolate(); - Local self = args.This(); - Local marker = - self->GetHiddenValue(PerIsolateData::ArrayMarkerPropName_string(isolate)); - if (marker.IsEmpty()) { - return Throw("'set' invoked on wrong receiver type"); - } - int32_t length = convertToUint( - self->Get(PerIsolateData::length_string(isolate)), &try_catch); - if (try_catch.HasCaught()) return try_catch.ReThrow(); - int32_t element_size = convertToUint( - self->Get(PerIsolateData::BYTES_PER_ELEMENT_string(isolate)), &try_catch); - if (try_catch.HasCaught()) return try_catch.ReThrow(); - - if (args.Length() == 0) { - return Throw("'set' must have at least one argument"); - } - if (!args[0]->IsObject() || - !args[0]->ToObject()->Has(PerIsolateData::length_string(isolate))) { - return Throw("'set' invoked with non-array argument"); - } - Handle source = args[0]->ToObject(); - int32_t source_length = convertToUint( - source->Get(PerIsolateData::length_string(isolate)), &try_catch); - if (try_catch.HasCaught()) return try_catch.ReThrow(); - - int32_t offset; - if (args.Length() < 2 || args[1]->IsUndefined()) { - offset = 0; - } else { - offset = convertToUint(args[1], &try_catch); - if (try_catch.HasCaught()) return try_catch.ReThrow(); - } - if (offset + source_length > length) { - return Throw("offset or source length out of bounds"); - } - - int32_t source_element_size; - if (source->GetHiddenValue( - PerIsolateData::ArrayMarkerPropName_string(isolate)).IsEmpty()) { - source_element_size = 0; - } else { - source_element_size = convertToUint( - source->Get(PerIsolateData::BYTES_PER_ELEMENT_string(isolate)), - &try_catch); - if (try_catch.HasCaught()) return try_catch.ReThrow(); - } - - if (element_size == source_element_size && - self->GetConstructor()->StrictEquals(source->GetConstructor())) { - // Use memmove on the array buffers. - Handle buffer = - self->Get(PerIsolateData::buffer_string(isolate))->ToObject(); - if (try_catch.HasCaught()) return try_catch.ReThrow(); - Handle source_buffer = - source->Get(PerIsolateData::buffer_string(isolate))->ToObject(); - if (try_catch.HasCaught()) return try_catch.ReThrow(); - int32_t byteOffset = convertToUint( - self->Get(PerIsolateData::byteOffset_string(isolate)), &try_catch); - if (try_catch.HasCaught()) return try_catch.ReThrow(); - int32_t source_byteOffset = convertToUint( - source->Get(PerIsolateData::byteOffset_string(isolate)), &try_catch); - if (try_catch.HasCaught()) return try_catch.ReThrow(); - - uint8_t* dest = byteOffset + offset * element_size + static_cast( - buffer->GetIndexedPropertiesExternalArrayData()); - uint8_t* src = source_byteOffset + static_cast( - source_buffer->GetIndexedPropertiesExternalArrayData()); - memmove(dest, src, source_length * element_size); - } else if (source_element_size == 0) { - // Source is not a typed array, copy element-wise sequentially. - for (int i = 0; i < source_length; ++i) { - self->Set(offset + i, source->Get(i)); - if (try_catch.HasCaught()) return try_catch.ReThrow(); - } - } else { - // Need to copy element-wise to make the right conversions. - Handle buffer = - self->Get(PerIsolateData::buffer_string(isolate))->ToObject(); - if (try_catch.HasCaught()) return try_catch.ReThrow(); - Handle source_buffer = - source->Get(PerIsolateData::buffer_string(isolate))->ToObject(); - if (try_catch.HasCaught()) return try_catch.ReThrow(); - - if (buffer->StrictEquals(source_buffer)) { - // Same backing store, need to handle overlap correctly. - // This gets a bit tricky in the case of different element sizes - // (which, of course, is extremely unlikely to ever occur in practice). - int32_t byteOffset = convertToUint( - self->Get(PerIsolateData::byteOffset_string(isolate)), &try_catch); - if (try_catch.HasCaught()) return try_catch.ReThrow(); - int32_t source_byteOffset = convertToUint( - source->Get(PerIsolateData::byteOffset_string(isolate)), &try_catch); - if (try_catch.HasCaught()) return try_catch.ReThrow(); - - // Copy as much as we can from left to right. - int i = 0; - int32_t next_dest_offset = byteOffset + (offset + 1) * element_size; - int32_t next_src_offset = source_byteOffset + source_element_size; - while (i < length && next_dest_offset <= next_src_offset) { - self->Set(offset + i, source->Get(i)); - ++i; - next_dest_offset += element_size; - next_src_offset += source_element_size; - } - // Of what's left, copy as much as we can from right to left. - int j = length - 1; - int32_t dest_offset = byteOffset + (offset + j) * element_size; - int32_t src_offset = source_byteOffset + j * source_element_size; - while (j >= i && dest_offset >= src_offset) { - self->Set(offset + j, source->Get(j)); - --j; - dest_offset -= element_size; - src_offset -= source_element_size; - } - // There can be at most 8 entries left in the middle that need buffering - // (because the largest element_size is 8 times the smallest). - ASSERT(j+1 - i <= 8); - Handle temp[8]; - for (int k = i; k <= j; ++k) { - temp[k - i] = source->Get(k); - } - for (int k = i; k <= j; ++k) { - self->Set(offset + k, temp[k - i]); - } - } else { - // Different backing stores, safe to copy element-wise sequentially. - for (int i = 0; i < source_length; ++i) - self->Set(offset + i, source->Get(i)); - } - } - - return Undefined(args.GetIsolate()); -} - - -void Shell::ExternalArrayWeakCallback(v8::Isolate* isolate, - Persistent* object, - uint8_t* data) { - HandleScope scope(isolate); - int32_t length = (*object)->Get( - PerIsolateData::byteLength_string(isolate))->Uint32Value(); - isolate->AdjustAmountOfExternalAllocatedMemory(-length); - delete[] data; - object->Dispose(isolate); -} - - -Handle Shell::Int8Array(const Arguments& args) { - return CreateExternalArray(args, v8::kExternalByteArray, sizeof(int8_t)); -} - - -Handle Shell::Uint8Array(const Arguments& args) { - return CreateExternalArray(args, kExternalUnsignedByteArray, sizeof(uint8_t)); -} - - -Handle Shell::Int16Array(const Arguments& args) { - return CreateExternalArray(args, kExternalShortArray, sizeof(int16_t)); -} - - -Handle Shell::Uint16Array(const Arguments& args) { - return CreateExternalArray( - args, kExternalUnsignedShortArray, sizeof(uint16_t)); -} - - -Handle Shell::Int32Array(const Arguments& args) { - return CreateExternalArray(args, kExternalIntArray, sizeof(int32_t)); -} - - -Handle Shell::Uint32Array(const Arguments& args) { - return CreateExternalArray(args, kExternalUnsignedIntArray, sizeof(uint32_t)); -} - - -Handle Shell::Float32Array(const Arguments& args) { - return CreateExternalArray( - args, kExternalFloatArray, sizeof(float)); // NOLINT -} - - -Handle Shell::Float64Array(const Arguments& args) { - return CreateExternalArray( - args, kExternalDoubleArray, sizeof(double)); // NOLINT -} - - -Handle Shell::Uint8ClampedArray(const Arguments& args) { - return CreateExternalArray(args, kExternalPixelArray, sizeof(uint8_t)); -} - Handle Shell::Quit(const Arguments& args) { int exit_code = args[0]->Int32Value(); @@ -1412,26 +819,6 @@ class BZip2Decompressor : public v8::StartupDataDecompressor { #endif -Handle Shell::CreateArrayBufferTemplate( - InvocationCallback fun) { - Handle buffer_template = FunctionTemplate::New(fun); - Local