summaryrefslogtreecommitdiff
path: root/deps/v8/src
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src')
-rw-r--r--deps/v8/src/api.cc220
-rw-r--r--deps/v8/src/api.h67
-rw-r--r--deps/v8/src/arguments.h21
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc76
-rw-r--r--deps/v8/src/arm/debug-arm.cc9
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc72
-rw-r--r--deps/v8/src/arm/ic-arm.cc19
-rw-r--r--deps/v8/src/arm/lithium-arm.cc60
-rw-r--r--deps/v8/src/arm/lithium-arm.h19
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc146
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.h2
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc42
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h4
-rw-r--r--deps/v8/src/arm/simulator-arm.cc136
-rw-r--r--deps/v8/src/arm/simulator-arm.h6
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc246
-rw-r--r--deps/v8/src/arraybuffer.js100
-rw-r--r--deps/v8/src/assembler.cc7
-rw-r--r--deps/v8/src/assembler.h2
-rw-r--r--deps/v8/src/ast.h19
-rw-r--r--deps/v8/src/atomicops_internals_x86_gcc.h2
-rw-r--r--deps/v8/src/bootstrapper.cc131
-rw-r--r--deps/v8/src/builtins-decls.h40
-rw-r--r--deps/v8/src/builtins.cc115
-rw-r--r--deps/v8/src/builtins.h6
-rw-r--r--deps/v8/src/code-stubs-hydrogen.cc81
-rw-r--r--deps/v8/src/code-stubs.cc4
-rw-r--r--deps/v8/src/code-stubs.h122
-rw-r--r--deps/v8/src/compiler.cc7
-rw-r--r--deps/v8/src/contexts.h20
-rw-r--r--deps/v8/src/d8-debug.cc12
-rw-r--r--deps/v8/src/d8.cc164
-rw-r--r--deps/v8/src/d8.h12
-rw-r--r--deps/v8/src/debug.cc7
-rw-r--r--deps/v8/src/debug.h1
-rw-r--r--deps/v8/src/factory.cc60
-rw-r--r--deps/v8/src/factory.h5
-rw-r--r--deps/v8/src/flag-definitions.h10
-rw-r--r--deps/v8/src/frames-inl.h34
-rw-r--r--deps/v8/src/frames.cc120
-rw-r--r--deps/v8/src/frames.h16
-rw-r--r--deps/v8/src/full-codegen.h5
-rw-r--r--deps/v8/src/global-handles.cc19
-rw-r--r--deps/v8/src/global-handles.h4
-rw-r--r--deps/v8/src/handles-inl.h15
-rw-r--r--deps/v8/src/heap-inl.h42
-rw-r--r--deps/v8/src/heap-snapshot-generator.cc5
-rw-r--r--deps/v8/src/heap.cc19
-rw-r--r--deps/v8/src/heap.h28
-rw-r--r--deps/v8/src/hydrogen-instructions.cc95
-rw-r--r--deps/v8/src/hydrogen-instructions.h198
-rw-r--r--deps/v8/src/hydrogen.cc439
-rw-r--r--deps/v8/src/hydrogen.h10
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc75
-rw-r--r--deps/v8/src/ia32/debug-ia32.cc9
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc71
-rw-r--r--deps/v8/src/ia32/ic-ia32.cc20
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.cc166
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.h2
-rw-r--r--deps/v8/src/ia32/lithium-ia32.cc106
-rw-r--r--deps/v8/src/ia32/lithium-ia32.h29
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc40
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h4
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc256
-rw-r--r--deps/v8/src/ic.cc208
-rw-r--r--deps/v8/src/ic.h8
-rw-r--r--deps/v8/src/incremental-marking.cc29
-rw-r--r--deps/v8/src/incremental-marking.h11
-rw-r--r--deps/v8/src/json-parser.h65
-rw-r--r--deps/v8/src/json-stringifier.h2
-rw-r--r--deps/v8/src/list-inl.h7
-rw-r--r--deps/v8/src/list.h3
-rw-r--r--deps/v8/src/lithium-allocator.cc90
-rw-r--r--deps/v8/src/lithium-allocator.h26
-rw-r--r--deps/v8/src/lithium.cc15
-rw-r--r--deps/v8/src/lithium.h187
-rw-r--r--deps/v8/src/macros.py2
-rw-r--r--deps/v8/src/messages.js10
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc87
-rw-r--r--deps/v8/src/mips/debug-mips.cc9
-rw-r--r--deps/v8/src/mips/full-codegen-mips.cc75
-rw-r--r--deps/v8/src/mips/ic-mips.cc19
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.cc97
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.h2
-rw-r--r--deps/v8/src/mips/lithium-mips.cc60
-rw-r--r--deps/v8/src/mips/lithium-mips.h19
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc42
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h4
-rw-r--r--deps/v8/src/mips/stub-cache-mips.cc246
-rw-r--r--deps/v8/src/mksnapshot.cc44
-rw-r--r--deps/v8/src/object-observe.js8
-rw-r--r--deps/v8/src/objects-debug.cc5
-rw-r--r--deps/v8/src/objects-inl.h134
-rw-r--r--deps/v8/src/objects-printer.cc2
-rw-r--r--deps/v8/src/objects-visiting-inl.h6
-rw-r--r--deps/v8/src/objects.cc1062
-rw-r--r--deps/v8/src/objects.h139
-rw-r--r--deps/v8/src/parser.cc46
-rw-r--r--deps/v8/src/parser.h9
-rw-r--r--deps/v8/src/platform-posix.cc17
-rw-r--r--deps/v8/src/prettyprinter.cc86
-rw-r--r--deps/v8/src/prettyprinter.h2
-rw-r--r--deps/v8/src/property-details.h109
-rw-r--r--deps/v8/src/property.cc1
-rw-r--r--deps/v8/src/property.h48
-rw-r--r--deps/v8/src/runtime.cc315
-rw-r--r--deps/v8/src/runtime.h2
-rw-r--r--deps/v8/src/sampler.cc3
-rw-r--r--deps/v8/src/scopes.cc3
-rw-r--r--deps/v8/src/serialize.cc7
-rw-r--r--deps/v8/src/spaces.cc18
-rw-r--r--deps/v8/src/string-stream.cc5
-rw-r--r--deps/v8/src/stub-cache.cc93
-rw-r--r--deps/v8/src/stub-cache.h37
-rw-r--r--deps/v8/src/third_party/vtune/v8-vtune.h2
-rw-r--r--deps/v8/src/third_party/vtune/vtune-jit.cc6
-rw-r--r--deps/v8/src/transitions.cc1
-rw-r--r--deps/v8/src/type-info.cc17
-rw-r--r--deps/v8/src/typedarray.js217
-rw-r--r--deps/v8/src/v8.h3
-rw-r--r--deps/v8/src/v8memory.h8
-rw-r--r--deps/v8/src/v8natives.js6
-rw-r--r--deps/v8/src/version.cc4
-rw-r--r--deps/v8/src/x64/assembler-x64.h16
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc93
-rw-r--r--deps/v8/src/x64/debug-x64.cc9
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc73
-rw-r--r--deps/v8/src/x64/ic-x64.cc20
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.cc157
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.h2
-rw-r--r--deps/v8/src/x64/lithium-x64.cc95
-rw-r--r--deps/v8/src/x64/lithium-x64.h24
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc43
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h4
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc193
135 files changed, 6047 insertions, 2369 deletions
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index 2b24ab07f..8a6eaf476 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -25,6 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// TODO(dcarney): remove
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
+
#include "api.h"
#include <string.h> // For memcpy, strlen.
@@ -625,7 +628,7 @@ i::Object** V8::GlobalizeReference(i::Isolate* isolate, i::Object** obj) {
void V8::MakeWeak(i::Isolate* isolate,
i::Object** object,
void* parameters,
- WeakReferenceCallback weak_reference_callback,
+ RevivableCallback weak_reference_callback,
NearDeathCallback near_death_callback) {
ASSERT(isolate == i::Isolate::Current());
LOG_API(isolate, "MakeWeak");
@@ -2409,6 +2412,46 @@ bool Value::IsArray() const {
}
+bool Value::IsArrayBuffer() const {
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsArrayBuffer()"))
+ return false;
+ return Utils::OpenHandle(this)->IsJSArrayBuffer();
+}
+
+
+bool Value::IsTypedArray() const {
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsArrayBuffer()"))
+ return false;
+ return Utils::OpenHandle(this)->IsJSTypedArray();
+}
+
+
+#define TYPED_ARRAY_LIST(F) \
+F(Uint8Array, kExternalUnsignedByteArray) \
+F(Int8Array, kExternalByteArray) \
+F(Uint16Array, kExternalUnsignedShortArray) \
+F(Int16Array, kExternalShortArray) \
+F(Uint32Array, kExternalUnsignedIntArray) \
+F(Int32Array, kExternalIntArray) \
+F(Float32Array, kExternalFloatArray) \
+F(Float64Array, kExternalDoubleArray) \
+F(Uint8ClampedArray, kExternalPixelArray)
+
+
+#define VALUE_IS_TYPED_ARRAY(TypedArray, type_const) \
+ bool Value::Is##TypedArray() const { \
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Value::Is" #TypedArray "()")) \
+ return false; \
+ i::Handle<i::Object> obj = Utils::OpenHandle(this); \
+ if (!obj->IsJSTypedArray()) return false; \
+ return i::JSTypedArray::cast(*obj)->type() == type_const; \
+ }
+
+TYPED_ARRAY_LIST(VALUE_IS_TYPED_ARRAY)
+
+#undef VALUE_IS_TYPED_ARRAY
+
+
bool Value::IsObject() const {
if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsObject()")) return false;
return Utils::OpenHandle(this)->IsJSObject();
@@ -2755,6 +2798,32 @@ void v8::ArrayBuffer::CheckCast(Value* that) {
}
+void v8::TypedArray::CheckCast(Value* that) {
+ if (IsDeadCheck(i::Isolate::Current(), "v8::TypedArray::Cast()")) return;
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ ApiCheck(obj->IsJSTypedArray(),
+ "v8::TypedArray::Cast()",
+ "Could not convert to TypedArray");
+}
+
+
+#define CHECK_TYPED_ARRAY_CAST(ApiClass, typeConst) \
+ void v8::ApiClass::CheckCast(Value* that) { \
+ if (IsDeadCheck(i::Isolate::Current(), "v8::" #ApiClass "::Cast()")) \
+ return; \
+ i::Handle<i::Object> obj = Utils::OpenHandle(that); \
+ ApiCheck(obj->IsJSTypedArray() && \
+ i::JSTypedArray::cast(*obj)->type() == typeConst, \
+ "v8::" #ApiClass "::Cast()", \
+ "Could not convert to " #ApiClass); \
+ }
+
+
+TYPED_ARRAY_LIST(CHECK_TYPED_ARRAY_CAST)
+
+#undef CHECK_TYPED_ARRAY_CAST
+
+
void v8::Date::CheckCast(v8::Value* that) {
i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate, "v8::Date::Cast()")) return;
@@ -3281,7 +3350,7 @@ Local<String> v8::Object::ObjectProtoToString() {
const char* postfix = "]";
int prefix_len = i::StrLength(prefix);
- int str_len = str->Length();
+ int str_len = str->Utf8Length();
int postfix_len = i::StrLength(postfix);
int buf_len = prefix_len + str_len + postfix_len;
@@ -3293,7 +3362,7 @@ Local<String> v8::Object::ObjectProtoToString() {
ptr += prefix_len;
// Write real content.
- str->WriteAscii(ptr, 0, str_len);
+ str->WriteUtf8(ptr, str_len);
ptr += str_len;
// Write postfix.
@@ -4061,7 +4130,7 @@ bool String::IsOneByte() const {
if (IsDeadCheck(str->GetIsolate(), "v8::String::IsOneByte()")) {
return false;
}
- return str->IsOneByteConvertible();
+ return str->HasOnlyOneByteChars();
}
@@ -5806,6 +5875,131 @@ Local<ArrayBuffer> v8::ArrayBuffer::New(void* data, size_t byte_length) {
}
+Local<ArrayBuffer> v8::TypedArray::Buffer() {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::TypedArray::Buffer()"))
+ return Local<ArrayBuffer>();
+ i::Handle<i::JSTypedArray> obj = Utils::OpenHandle(this);
+ ASSERT(obj->buffer()->IsJSArrayBuffer());
+ i::Handle<i::JSArrayBuffer> buffer(i::JSArrayBuffer::cast(obj->buffer()));
+ return Utils::ToLocal(buffer);
+}
+
+
+size_t v8::TypedArray::ByteOffset() {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::TypedArray::ByteOffset()")) return 0;
+ i::Handle<i::JSTypedArray> obj = Utils::OpenHandle(this);
+ return static_cast<size_t>(obj->byte_offset()->Number());
+}
+
+
+size_t v8::TypedArray::ByteLength() {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::TypedArray::ByteLength()")) return 0;
+ i::Handle<i::JSTypedArray> obj = Utils::OpenHandle(this);
+ return static_cast<size_t>(obj->byte_length()->Number());
+}
+
+
+size_t v8::TypedArray::Length() {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::TypedArray::Length()")) return 0;
+ i::Handle<i::JSTypedArray> obj = Utils::OpenHandle(this);
+ return static_cast<size_t>(obj->length()->Number());
+}
+
+
+void* v8::TypedArray::BaseAddress() {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::TypedArray::BaseAddress()")) return NULL;
+ i::Handle<i::JSTypedArray> obj = Utils::OpenHandle(this);
+ i::Handle<i::JSArrayBuffer> buffer(i::JSArrayBuffer::cast(obj->buffer()));
+ void* buffer_data = buffer->backing_store();
+ size_t byte_offset = static_cast<size_t>(obj->byte_offset()->Number());
+ return static_cast<uint8_t*>(buffer_data) + byte_offset;
+}
+
+
+template<typename ElementType,
+ ExternalArrayType array_type,
+ i::ElementsKind elements_kind>
+i::Handle<i::JSTypedArray> NewTypedArray(
+ i::Isolate* isolate,
+ Handle<ArrayBuffer> array_buffer, size_t byte_offset, size_t length) {
+ i::Handle<i::JSTypedArray> obj =
+ isolate->factory()->NewJSTypedArray(array_type);
+ i::Handle<i::JSArrayBuffer> buffer = Utils::OpenHandle(*array_buffer);
+
+ ASSERT(byte_offset % sizeof(ElementType) == 0);
+ ASSERT(byte_offset + length * sizeof(ElementType) <=
+ static_cast<size_t>(buffer->byte_length()->Number()));
+
+ obj->set_buffer(*buffer);
+
+ i::Handle<i::Object> byte_offset_object = isolate->factory()->NewNumber(
+ static_cast<double>(byte_offset));
+ obj->set_byte_offset(*byte_offset_object);
+
+ i::Handle<i::Object> byte_length_object = isolate->factory()->NewNumber(
+ static_cast<double>(length * sizeof(ElementType)));
+ obj->set_byte_length(*byte_length_object);
+
+ i::Handle<i::Object> length_object = isolate->factory()->NewNumber(
+ static_cast<double>(length));
+ obj->set_length(*length_object);
+
+ i::Handle<i::ExternalArray> elements =
+ isolate->factory()->NewExternalArray(
+ static_cast<int>(length), array_type,
+ static_cast<uint8_t*>(buffer->backing_store()) + byte_offset);
+ i::Handle<i::Map> map =
+ isolate->factory()->GetElementsTransitionMap(
+ obj, elements_kind);
+ obj->set_map(*map);
+ obj->set_elements(*elements);
+ return obj;
+}
+
+
+#define TYPED_ARRAY_NEW(TypedArray, element_type, array_type, elements_kind) \
+ Local<TypedArray> TypedArray::New(Handle<ArrayBuffer> array_buffer, \
+ size_t byte_offset, size_t length) { \
+ i::Isolate* isolate = i::Isolate::Current(); \
+ EnsureInitializedForIsolate(isolate, \
+ "v8::" #TypedArray "::New(Handle<ArrayBuffer>, size_t, size_t)"); \
+ LOG_API(isolate, \
+ "v8::" #TypedArray "::New(Handle<ArrayBuffer>, size_t, size_t)"); \
+ ENTER_V8(isolate); \
+ i::Handle<i::JSTypedArray> obj = \
+ NewTypedArray<element_type, array_type, elements_kind>( \
+ isolate, array_buffer, byte_offset, length); \
+ return Utils::ToLocal##TypedArray(obj); \
+ }
+
+
+TYPED_ARRAY_NEW(Uint8Array, uint8_t, kExternalUnsignedByteArray,
+ i::EXTERNAL_UNSIGNED_BYTE_ELEMENTS)
+TYPED_ARRAY_NEW(Uint8ClampedArray, uint8_t, kExternalPixelArray,
+ i::EXTERNAL_PIXEL_ELEMENTS)
+TYPED_ARRAY_NEW(Int8Array, int8_t, kExternalByteArray,
+ i::EXTERNAL_BYTE_ELEMENTS)
+TYPED_ARRAY_NEW(Uint16Array, uint16_t, kExternalUnsignedShortArray,
+ i::EXTERNAL_UNSIGNED_SHORT_ELEMENTS)
+TYPED_ARRAY_NEW(Int16Array, int16_t, kExternalShortArray,
+ i::EXTERNAL_SHORT_ELEMENTS)
+TYPED_ARRAY_NEW(Uint32Array, uint32_t, kExternalUnsignedIntArray,
+ i::EXTERNAL_UNSIGNED_INT_ELEMENTS)
+TYPED_ARRAY_NEW(Int32Array, int32_t, kExternalIntArray,
+ i::EXTERNAL_INT_ELEMENTS)
+TYPED_ARRAY_NEW(Float32Array, float, kExternalFloatArray,
+ i::EXTERNAL_FLOAT_ELEMENTS)
+TYPED_ARRAY_NEW(Float64Array, double, kExternalDoubleArray,
+ i::EXTERNAL_DOUBLE_ELEMENTS)
+
+#undef TYPED_ARRAY_NEW
+
+
Local<Symbol> v8::Symbol::New(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
EnsureInitializedForIsolate(i_isolate, "v8::Symbol::New()");
@@ -5883,6 +6077,19 @@ Local<Integer> v8::Integer::NewFromUnsigned(uint32_t value, Isolate* isolate) {
}
+#ifdef DEBUG
+v8::AssertNoGCScope::AssertNoGCScope(v8::Isolate* isolate)
+ : isolate_(isolate),
+ last_state_(i::EnterAllocationScope(
+ reinterpret_cast<i::Isolate*>(isolate), false)) {
+}
+
+v8::AssertNoGCScope::~AssertNoGCScope() {
+ i::ExitAllocationScope(reinterpret_cast<i::Isolate*>(isolate_), last_state_);
+}
+#endif
+
+
void V8::IgnoreOutOfMemoryException() {
EnterIsolateIfNeeded()->set_ignore_out_of_memory(true);
}
@@ -6295,9 +6502,10 @@ String::AsciiValue::AsciiValue(v8::Handle<v8::Value> obj)
TryCatch try_catch;
Handle<String> str = obj->ToString();
if (str.IsEmpty()) return;
- length_ = str->Length();
+ length_ = str->Utf8Length();
str_ = i::NewArray<char>(length_ + 1);
- str->WriteAscii(str_);
+ str->WriteUtf8(str_);
+ ASSERT(i::String::NonAsciiStart(str_, length_) >= length_);
}
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index f62541dc0..686abf75c 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -171,6 +171,16 @@ class RegisteredExtension {
V(Object, JSObject) \
V(Array, JSArray) \
V(ArrayBuffer, JSArrayBuffer) \
+ V(TypedArray, JSTypedArray) \
+ V(Uint8Array, JSTypedArray) \
+ V(Uint8ClampedArray, JSTypedArray) \
+ V(Int8Array, JSTypedArray) \
+ V(Uint16Array, JSTypedArray) \
+ V(Int16Array, JSTypedArray) \
+ V(Uint32Array, JSTypedArray) \
+ V(Int32Array, JSTypedArray) \
+ V(Float32Array, JSTypedArray) \
+ V(Float64Array, JSTypedArray) \
V(String, String) \
V(Symbol, Symbol) \
V(Script, Object) \
@@ -208,6 +218,28 @@ class Utils {
v8::internal::Handle<v8::internal::JSArray> obj);
static inline Local<ArrayBuffer> ToLocal(
v8::internal::Handle<v8::internal::JSArrayBuffer> obj);
+
+ static inline Local<TypedArray> ToLocal(
+ v8::internal::Handle<v8::internal::JSTypedArray> obj);
+ static inline Local<Uint8Array> ToLocalUint8Array(
+ v8::internal::Handle<v8::internal::JSTypedArray> obj);
+ static inline Local<Uint8ClampedArray> ToLocalUint8ClampedArray(
+ v8::internal::Handle<v8::internal::JSTypedArray> obj);
+ static inline Local<Int8Array> ToLocalInt8Array(
+ v8::internal::Handle<v8::internal::JSTypedArray> obj);
+ static inline Local<Uint16Array> ToLocalUint16Array(
+ v8::internal::Handle<v8::internal::JSTypedArray> obj);
+ static inline Local<Int16Array> ToLocalInt16Array(
+ v8::internal::Handle<v8::internal::JSTypedArray> obj);
+ static inline Local<Uint32Array> ToLocalUint32Array(
+ v8::internal::Handle<v8::internal::JSTypedArray> obj);
+ static inline Local<Int32Array> ToLocalInt32Array(
+ v8::internal::Handle<v8::internal::JSTypedArray> obj);
+ static inline Local<Float32Array> ToLocalFloat32Array(
+ v8::internal::Handle<v8::internal::JSTypedArray> obj);
+ static inline Local<Float64Array> ToLocalFloat64Array(
+ v8::internal::Handle<v8::internal::JSTypedArray> obj);
+
static inline Local<Message> MessageToLocal(
v8::internal::Handle<v8::internal::Object> obj);
static inline Local<StackTrace> StackTraceToLocal(
@@ -262,14 +294,34 @@ v8::internal::Handle<T> v8::internal::Handle<T>::EscapeFrom(
}
+class InternalHandleHelper {
+ public:
+ template<class From, class To>
+ static inline Local<To> Convert(v8::internal::Handle<From> obj) {
+ return Local<To>(reinterpret_cast<To*>(obj.location()));
+ }
+};
+
+
// Implementations of ToLocal
#define MAKE_TO_LOCAL(Name, From, To) \
Local<v8::To> Utils::Name(v8::internal::Handle<v8::internal::From> obj) { \
ASSERT(obj.is_null() || !obj->IsTheHole()); \
- return Local<To>(reinterpret_cast<To*>(obj.location())); \
+ return InternalHandleHelper::Convert<v8::internal::From, v8::To>(obj); \
}
+
+#define MAKE_TO_LOCAL_TYPED_ARRAY(TypedArray, typeConst) \
+ Local<v8::TypedArray> Utils::ToLocal##TypedArray( \
+ v8::internal::Handle<v8::internal::JSTypedArray> obj) { \
+ ASSERT(obj.is_null() || !obj->IsTheHole()); \
+ ASSERT(obj->type() == typeConst); \
+ return InternalHandleHelper:: \
+ Convert<v8::internal::JSTypedArray, v8::TypedArray>(obj); \
+ }
+
+
MAKE_TO_LOCAL(ToLocal, Context, Context)
MAKE_TO_LOCAL(ToLocal, Object, Value)
MAKE_TO_LOCAL(ToLocal, JSFunction, Function)
@@ -279,6 +331,18 @@ MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp)
MAKE_TO_LOCAL(ToLocal, JSObject, Object)
MAKE_TO_LOCAL(ToLocal, JSArray, Array)
MAKE_TO_LOCAL(ToLocal, JSArrayBuffer, ArrayBuffer)
+MAKE_TO_LOCAL(ToLocal, JSTypedArray, TypedArray)
+
+MAKE_TO_LOCAL_TYPED_ARRAY(Uint8Array, kExternalUnsignedByteArray)
+MAKE_TO_LOCAL_TYPED_ARRAY(Uint8ClampedArray, kExternalPixelArray)
+MAKE_TO_LOCAL_TYPED_ARRAY(Int8Array, kExternalByteArray)
+MAKE_TO_LOCAL_TYPED_ARRAY(Uint16Array, kExternalUnsignedShortArray)
+MAKE_TO_LOCAL_TYPED_ARRAY(Int16Array, kExternalShortArray)
+MAKE_TO_LOCAL_TYPED_ARRAY(Uint32Array, kExternalUnsignedIntArray)
+MAKE_TO_LOCAL_TYPED_ARRAY(Int32Array, kExternalIntArray)
+MAKE_TO_LOCAL_TYPED_ARRAY(Float32Array, kExternalFloatArray)
+MAKE_TO_LOCAL_TYPED_ARRAY(Float64Array, kExternalDoubleArray)
+
MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate)
MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate)
MAKE_TO_LOCAL(ToLocal, SignatureInfo, Signature)
@@ -293,6 +357,7 @@ MAKE_TO_LOCAL(Uint32ToLocal, Object, Uint32)
MAKE_TO_LOCAL(ExternalToLocal, JSObject, External)
MAKE_TO_LOCAL(ToLocal, DeclaredAccessorDescriptor, DeclaredAccessorDescriptor)
+#undef MAKE_TO_LOCAL_TYPED_ARRAY
#undef MAKE_TO_LOCAL
diff --git a/deps/v8/src/arguments.h b/deps/v8/src/arguments.h
index f8fb00c57..1423d5642 100644
--- a/deps/v8/src/arguments.h
+++ b/deps/v8/src/arguments.h
@@ -115,15 +115,18 @@ class CustomArguments : public Relocatable {
#define DECLARE_RUNTIME_FUNCTION(Type, Name) \
-Type Name(Arguments args, Isolate* isolate)
-
-
-#define RUNTIME_FUNCTION(Type, Name) \
-Type Name(Arguments args, Isolate* isolate)
-
-
-#define RUNTIME_ARGUMENTS(isolate, args) args, isolate
-
+Type Name(int args_length, Object** args_object, Isolate* isolate)
+
+#define RUNTIME_FUNCTION(Type, Name) \
+static Type __RT_impl_##Name(Arguments args, Isolate* isolate); \
+Type Name(int args_length, Object** args_object, Isolate* isolate) { \
+ Arguments args(args_length, args_object); \
+ return __RT_impl_##Name(args, isolate); \
+} \
+static Type __RT_impl_##Name(Arguments args, Isolate* isolate)
+
+#define RUNTIME_ARGUMENTS(isolate, args) \
+ args.length(), args.arguments(), isolate
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index cc6caca3d..86da76ac3 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -30,6 +30,7 @@
#if defined(V8_TARGET_ARCH_ARM)
#include "bootstrapper.h"
+#include "builtins-decls.h"
#include "code-stubs.h"
#include "regexp-macro-assembler.h"
#include "stub-cache.h"
@@ -73,6 +74,28 @@ void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
}
+void LoadFieldStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r0 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->stack_parameter_count_ = NULL;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r1 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->stack_parameter_count_ = NULL;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -115,9 +138,10 @@ static void InitializeArrayConstructorDescriptor(
int constant_stack_parameter_count) {
// register state
// r0 -- number of arguments
+ // r1 -- function
// r2 -- type info cell with elements kind
- static Register registers[] = { r2 };
- descriptor->register_param_count_ = 1;
+ static Register registers[] = { r1, r2 };
+ descriptor->register_param_count_ = 2;
if (constant_stack_parameter_count != 0) {
// stack param count needs (constructor pointer, and single argument)
descriptor->stack_parameter_count_ = &r0;
@@ -3776,12 +3800,6 @@ Register InstanceofStub::left() { return r0; }
Register InstanceofStub::right() { return r1; }
-void LoadFieldStub::Generate(MacroAssembler* masm) {
- StubCompiler::DoGenerateFastPropertyLoad(masm, r0, reg_, inobject_, index_);
- __ Ret();
-}
-
-
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The displacement is the offset of the last parameter (if any)
// relative to the frame pointer.
@@ -4733,6 +4751,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
Handle<Object> terminal_kind_sentinel =
TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(),
LAST_FAST_ELEMENTS_KIND);
+ __ JumpIfNotSmi(r3, &miss);
__ cmp(r3, Operand(terminal_kind_sentinel));
__ b(gt, &miss);
// Make sure the function is the Array() function
@@ -5941,8 +5960,36 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ AllocateAsciiConsString(r7, r6, r4, r5, &call_runtime);
__ bind(&allocated);
// Fill the fields of the cons string.
+ Label skip_write_barrier, after_writing;
+ ExternalReference high_promotion_mode = ExternalReference::
+ new_space_high_promotion_mode_active_address(masm->isolate());
+ __ mov(r4, Operand(high_promotion_mode));
+ __ ldr(r4, MemOperand(r4, 0));
+ __ cmp(r4, Operand::Zero());
+ __ b(eq, &skip_write_barrier);
+
__ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset));
+ __ RecordWriteField(r7,
+ ConsString::kFirstOffset,
+ r0,
+ r4,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
__ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset));
+ __ RecordWriteField(r7,
+ ConsString::kSecondOffset,
+ r1,
+ r4,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
+ __ jmp(&after_writing);
+
+ __ bind(&skip_write_barrier);
+ __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset));
+ __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset));
+
+ __ bind(&after_writing);
+
__ mov(r0, Operand(r7));
__ IncrementCounter(counters->string_add_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
@@ -6788,6 +6835,9 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
{ REG(r5), REG(r0), REG(r6), EMIT_REMEMBERED_SET },
// FastNewClosureStub::Generate
{ REG(r2), REG(r4), REG(r1), EMIT_REMEMBERED_SET },
+ // StringAddStub::Generate
+ { REG(r7), REG(r1), REG(r4), EMIT_REMEMBERED_SET },
+ { REG(r7), REG(r0), REG(r4), EMIT_REMEMBERED_SET },
// Null termination.
{ REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
};
@@ -7312,14 +7362,8 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Get the elements kind and case on that.
__ cmp(r2, Operand(undefined_sentinel));
__ b(eq, &no_info);
- __ ldr(r3, FieldMemOperand(r2, kPointerSize));
-
- // There is no info if the call site went megamorphic either
- // TODO(mvstanton): Really? I thought if it was the array function that
- // the cell wouldn't get stamped as megamorphic.
- __ cmp(r3,
- Operand(TypeFeedbackCells::MegamorphicSentinel(masm->isolate())));
- __ b(eq, &no_info);
+ __ ldr(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+ __ JumpIfNotSmi(r3, &no_info);
__ SmiUntag(r3);
__ jmp(&switch_ready);
__ bind(&no_info);
diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc
index 848fae20d..6bfaf414c 100644
--- a/deps/v8/src/arm/debug-arm.cc
+++ b/deps/v8/src/arm/debug-arm.cc
@@ -224,6 +224,15 @@ void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
}
+void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
+ // Register state for CompareNil IC
+ // ----------- S t a t e -------------
+ // -- r0 : value
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, r0.bit(), 0);
+}
+
+
void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC call (from ic-arm.cc)
// ----------- S t a t e -------------
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index 0ef4be064..0bc1f48c8 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -1593,7 +1593,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
: ObjectLiteral::kNoFlags;
__ mov(r0, Operand(Smi::FromInt(flags)));
int properties_count = constant_properties->length() / 2;
- if (expr->depth() > 1) {
+ if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
+ expr->depth() > 1) {
__ Push(r3, r2, r1, r0);
__ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements ||
@@ -1939,11 +1940,12 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
Label resume;
__ CompareRoot(result_register(), Heap::kTheHoleValueRootIndex);
__ b(ne, &resume);
- __ pop(result_register());
if (expr->yield_kind() == Yield::SUSPEND) {
- // TODO(wingo): Box into { value: VALUE, done: false }.
+ EmitReturnIteratorResult(false);
+ } else {
+ __ pop(result_register());
+ EmitReturnSequence();
}
- EmitReturnSequence();
__ bind(&resume);
context()->Plug(result_register());
@@ -1955,18 +1957,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(r1, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
__ str(r1, FieldMemOperand(result_register(),
JSGeneratorObject::kContinuationOffset));
- __ pop(result_register());
- // TODO(wingo): Box into { value: VALUE, done: true }.
-
- // Exit all nested statements.
- NestedStatement* current = nesting_stack_;
- int stack_depth = 0;
- int context_length = 0;
- while (current != NULL) {
- current = current->Exit(&stack_depth, &context_length);
- }
- __ Drop(stack_depth);
- EmitReturnSequence();
+ EmitReturnIteratorResult(true);
break;
}
@@ -2074,6 +2065,55 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
}
+void FullCodeGenerator::EmitReturnIteratorResult(bool done) {
+ Label gc_required;
+ Label allocated;
+
+ Handle<Map> map(isolate()->native_context()->generator_result_map());
+
+ __ Allocate(map->instance_size(), r0, r2, r3, &gc_required, TAG_OBJECT);
+
+ __ bind(&allocated);
+ __ mov(r1, Operand(map));
+ __ pop(r2);
+ __ mov(r3, Operand(isolate()->factory()->ToBoolean(done)));
+ __ mov(r4, Operand(isolate()->factory()->empty_fixed_array()));
+ ASSERT_EQ(map->instance_size(), 5 * kPointerSize);
+ __ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset));
+ __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
+ __ str(r2,
+ FieldMemOperand(r0, JSGeneratorObject::kResultValuePropertyOffset));
+ __ str(r3,
+ FieldMemOperand(r0, JSGeneratorObject::kResultDonePropertyOffset));
+
+ // Only the value field needs a write barrier, as the other values are in the
+ // root set.
+ __ RecordWriteField(r0, JSGeneratorObject::kResultValuePropertyOffset,
+ r2, r3, kLRHasBeenSaved, kDontSaveFPRegs);
+
+ if (done) {
+ // Exit all nested statements.
+ NestedStatement* current = nesting_stack_;
+ int stack_depth = 0;
+ int context_length = 0;
+ while (current != NULL) {
+ current = current->Exit(&stack_depth, &context_length);
+ }
+ __ Drop(stack_depth);
+ }
+
+ EmitReturnSequence();
+
+ __ bind(&gc_required);
+ __ Push(Smi::FromInt(map->instance_size()));
+ __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ ldr(context_register(),
+ MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ jmp(&allocated);
+}
+
+
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index 893ac4e11..c644be59d 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -1180,6 +1180,25 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
}
+void StoreIC::GenerateSlow(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- r0 : value
+ // -- r2 : key
+ // -- r1 : receiver
+ // -- lr : return address
+ // -----------------------------------
+
+ // Push receiver, key and value for runtime call.
+ __ Push(r1, r2, r0);
+
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- r0 : value
diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc
index 66c108d4f..3fe46ffd7 100644
--- a/deps/v8/src/arm/lithium-arm.cc
+++ b/deps/v8/src/arm/lithium-arm.cc
@@ -552,6 +552,11 @@ LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
}
+LOperand* LChunkBuilder::UseConstant(HValue* value) {
+ return chunk_->DefineConstantOperand(HConstant::cast(value));
+}
+
+
LOperand* LChunkBuilder::UseAny(HValue* value) {
return value->IsConstant()
? chunk_->DefineConstantOperand(HConstant::cast(value))
@@ -672,7 +677,7 @@ LUnallocated* LChunkBuilder::TempRegister() {
int vreg = allocator_->GetVirtualRegister();
if (!allocator_->AllocationOk()) {
Abort("Out of virtual registers while trying to allocate temp register.");
- return NULL;
+ vreg = 0;
}
operand->set_virtual_register(vreg);
return operand;
@@ -1300,8 +1305,8 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
return DefineAsRegister(new(zone()) LBitI(left, right));
} else {
ASSERT(instr->representation().IsTagged());
@@ -1479,15 +1484,15 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left;
- LOperand* right = UseOrConstant(instr->MostConstantOperand());
+ LOperand* right = UseOrConstant(instr->BetterRightOperand());
LOperand* temp = NULL;
if (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
(instr->CheckFlag(HValue::kCanOverflow) ||
!right->IsConstantOperand())) {
- left = UseRegister(instr->LeastConstantOperand());
+ left = UseRegister(instr->BetterLeftOperand());
temp = TempRegister();
} else {
- left = UseRegisterAtStart(instr->LeastConstantOperand());
+ left = UseRegisterAtStart(instr->BetterLeftOperand());
}
LMulI* mul = new(zone()) LMulI(left, right, temp);
if (instr->CheckFlag(HValue::kCanOverflow) ||
@@ -1597,8 +1602,8 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
LAddI* add = new(zone()) LAddI(left, right);
LInstruction* result = DefineAsRegister(add);
if (instr->CheckFlag(HValue::kCanOverflow)) {
@@ -1629,8 +1634,8 @@ LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- left = UseRegisterAtStart(instr->LeastConstantOperand());
- right = UseOrConstantAtStart(instr->MostConstantOperand());
+ left = UseRegisterAtStart(instr->BetterLeftOperand());
+ right = UseOrConstantAtStart(instr->BetterRightOperand());
} else {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->left()->representation().IsDouble());
@@ -2114,8 +2119,8 @@ LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
- return DefineAsRegister(
- new(zone()) LLoadNamedField(UseRegisterAtStart(instr->object())));
+ LOperand* obj = UseRegisterAtStart(instr->object());
+ return DefineAsRegister(new(zone()) LLoadNamedField(obj));
}
@@ -2150,12 +2155,6 @@ LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
}
-LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LLoadElements(input));
-}
-
-
LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
HLoadExternalArrayPointer* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
@@ -2319,14 +2318,25 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
: UseRegisterAtStart(instr->object());
}
- LOperand* val = needs_write_barrier
- ? UseTempRegister(instr->value())
- : UseRegister(instr->value());
+ LOperand* val;
+ if (needs_write_barrier ||
+ (FLAG_track_fields && instr->field_representation().IsSmi())) {
+ val = UseTempRegister(instr->value());
+ } else if (FLAG_track_double_fields &&
+ instr->field_representation().IsDouble()) {
+ val = UseRegisterAtStart(instr->value());
+ } else {
+ val = UseRegister(instr->value());
+ }
// We need a temporary register for write barrier of the map field.
LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
- return new(zone()) LStoreNamedField(obj, val, temp);
+ LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp);
+ if (FLAG_track_fields && instr->field_representation().IsSmi()) {
+ return AssignEnvironment(result);
+ }
+ return result;
}
@@ -2378,7 +2388,9 @@ LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling();
- LOperand* size = UseTempRegister(instr->size());
+ LOperand* size = instr->size()->IsConstant()
+ ? UseConstant(instr->size())
+ : UseTempRegister(instr->size());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
LAllocate* result = new(zone()) LAllocate(size, temp1, temp2);
@@ -2440,7 +2452,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
- if (spill_index > LUnallocated::kMaxFixedIndex) {
+ if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
Abort("Too many spill slots needed for OSR");
spill_index = 0;
}
diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h
index d81881e6f..116d57621 100644
--- a/deps/v8/src/arm/lithium-arm.h
+++ b/deps/v8/src/arm/lithium-arm.h
@@ -122,7 +122,6 @@ class LCodeGen;
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
- V(LoadElements) \
V(LoadExternalArrayPointer) \
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
@@ -1574,18 +1573,6 @@ class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadElements: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadElements(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
-};
-
-
class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadExternalArrayPointer(LOperand* object) {
@@ -2142,6 +2129,9 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
Handle<Map> transition() const { return hydrogen()->transition(); }
+ Representation representation() const {
+ return hydrogen()->field_representation();
+ }
};
@@ -2787,6 +2777,9 @@ class LChunkBuilder BASE_EMBEDDED {
MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+ // An input operand in a constant operand.
+ MUST_USE_RESULT LOperand* UseConstant(HValue* value);
+
// An input operand in register, stack slot or a constant operand.
// Will not be moved to a register even if one is freely available.
MUST_USE_RESULT LOperand* UseAny(HValue* value);
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index 29e01b918..3a0f476b5 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -91,6 +91,10 @@ void LCodeGen::FinishCode(Handle<Code> code) {
prototype_maps_.at(i)->AddDependentCode(
DependentCode::kPrototypeCheckGroup, code);
}
+ for (int i = 0 ; i < transition_maps_.length(); i++) {
+ transition_maps_.at(i)->AddDependentCode(
+ DependentCode::kTransitionGroup, code);
+ }
}
@@ -1161,14 +1165,14 @@ void LCodeGen::DoModI(LModI* instr) {
Register result = ToRegister(instr->result());
Label done;
- if (CpuFeatures::IsSupported(SUDIV)) {
- CpuFeatureScope scope(masm(), SUDIV);
// Check for x % 0.
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ cmp(right, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
- }
+ if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ cmp(right, Operand::Zero());
+ DeoptimizeIf(eq, instr->environment());
+ }
+ if (CpuFeatures::IsSupported(SUDIV)) {
+ CpuFeatureScope scope(masm(), SUDIV);
// Check for (kMinInt % -1).
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
Label left_not_min_int;
@@ -1185,12 +1189,12 @@ void LCodeGen::DoModI(LModI* instr) {
__ sdiv(result, left, right);
__ mls(result, result, right, left);
- __ cmp(result, Operand::Zero());
- __ b(ne, &done);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ cmp(left, Operand::Zero());
- DeoptimizeIf(lt, instr->environment());
+ __ cmp(result, Operand::Zero());
+ __ b(ne, &done);
+ __ cmp(left, Operand::Zero());
+ DeoptimizeIf(lt, instr->environment());
}
} else {
Register scratch = scratch0();
@@ -1206,13 +1210,7 @@ void LCodeGen::DoModI(LModI* instr) {
ASSERT(!scratch.is(right));
ASSERT(!scratch.is(result));
- Label vfp_modulo, both_positive, right_negative;
-
- // Check for x % 0.
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ cmp(right, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
- }
+ Label vfp_modulo, right_negative;
__ Move(result, left);
@@ -1230,7 +1228,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ JumpIfNotPowerOfTwoOrZeroAndNeg(right,
scratch,
&right_negative,
- &both_positive);
+ &vfp_modulo);
// Perform modulo operation (scratch contains right - 1).
__ and_(result, scratch, Operand(left));
__ b(&done);
@@ -1239,23 +1237,6 @@ void LCodeGen::DoModI(LModI* instr) {
// Negate right. The sign of the divisor does not matter.
__ rsb(right, right, Operand::Zero());
- __ bind(&both_positive);
- const int kUnfolds = 3;
- // If the right hand side is smaller than the (nonnegative)
- // left hand side, the left hand side is the result.
- // Else try a few subtractions of the left hand side.
- __ mov(scratch, left);
- for (int i = 0; i < kUnfolds; i++) {
- // Check if the left hand side is less or equal than the
- // the right hand side.
- __ cmp(scratch, Operand(right));
- __ mov(result, scratch, LeaveCC, lt);
- __ b(lt, &done);
- // If not, reduce the left hand side by the right hand
- // side and check again.
- if (i < kUnfolds - 1) __ sub(scratch, scratch, right);
- }
-
__ bind(&vfp_modulo);
// Load the arguments in VFP registers.
// The divisor value is preloaded before. Be careful that 'right'
@@ -3076,13 +3057,20 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+ int offset = instr->hydrogen()->offset();
Register object = ToRegister(instr->object());
+ if (instr->hydrogen()->representation().IsDouble()) {
+ DwVfpRegister result = ToDoubleRegister(instr->result());
+ __ vldr(result, FieldMemOperand(object, offset));
+ return;
+ }
+
Register result = ToRegister(instr->result());
if (instr->hydrogen()->is_in_object()) {
- __ ldr(result, FieldMemOperand(object, instr->hydrogen()->offset()));
+ __ ldr(result, FieldMemOperand(object, offset));
} else {
__ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
- __ ldr(result, FieldMemOperand(result, instr->hydrogen()->offset()));
+ __ ldr(result, FieldMemOperand(result, offset));
}
}
@@ -3228,40 +3216,6 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
}
-void LCodeGen::DoLoadElements(LLoadElements* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->object());
- Register scratch = scratch0();
-
- __ ldr(result, FieldMemOperand(input, JSObject::kElementsOffset));
- if (FLAG_debug_code) {
- Label done, fail;
- __ ldr(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(scratch, ip);
- __ b(eq, &done);
- __ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
- __ cmp(scratch, ip);
- __ b(eq, &done);
- // |scratch| still contains |input|'s map.
- __ ldr(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
- __ ubfx(scratch, scratch, Map::kElementsKindShift,
- Map::kElementsKindBitCount);
- __ cmp(scratch, Operand(GetInitialFastElementsKind()));
- __ b(lt, &fail);
- __ cmp(scratch, Operand(TERMINAL_FAST_ELEMENTS_KIND));
- __ b(le, &done);
- __ cmp(scratch, Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
- __ b(lt, &fail);
- __ cmp(scratch, Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
- __ b(le, &done);
- __ bind(&fail);
- __ Abort("Check for fast or external elements failed.");
- __ bind(&done);
- }
-}
-
-
void LCodeGen::DoLoadExternalArrayPointer(
LLoadExternalArrayPointer* instr) {
Register to_reg = ToRegister(instr->result());
@@ -4234,8 +4188,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
__ mov(r0, Operand(instr->arity()));
__ mov(r2, Operand(instr->hydrogen()->property_cell()));
- Object* cell_value = instr->hydrogen()->property_cell()->value();
- ElementsKind kind = static_cast<ElementsKind>(Smi::cast(cell_value)->value());
+ ElementsKind kind = instr->hydrogen()->elements_kind();
if (instr->arity() == 0) {
ArrayNoArgumentConstructorStub stub(kind);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
@@ -4262,15 +4215,34 @@ void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+ Representation representation = instr->representation();
+
Register object = ToRegister(instr->object());
- Register value = ToRegister(instr->value());
Register scratch = scratch0();
int offset = instr->offset();
- ASSERT(!object.is(value));
+ Handle<Map> transition = instr->transition();
- if (!instr->transition().is_null()) {
- __ mov(scratch, Operand(instr->transition()));
+ if (FLAG_track_fields && representation.IsSmi()) {
+ Register value = ToRegister(instr->value());
+ __ SmiTag(value, value, SetCC);
+ if (!instr->hydrogen()->value()->range()->IsInSmiRange()) {
+ DeoptimizeIf(vs, instr->environment());
+ }
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ ASSERT(transition.is_null());
+ ASSERT(instr->is_in_object());
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ DwVfpRegister value = ToDoubleRegister(instr->value());
+ __ vstr(value, FieldMemOperand(object, offset));
+ return;
+ }
+
+ if (!transition.is_null()) {
+ if (transition->CanBeDeprecated()) {
+ transition_maps_.Add(transition, info()->zone());
+ }
+ __ mov(scratch, Operand(transition));
__ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
Register temp = ToRegister(instr->temp());
@@ -4287,6 +4259,8 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
// Do the store.
+ Register value = ToRegister(instr->value());
+ ASSERT(!object.is(value));
HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
@@ -5138,6 +5112,8 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
} else {
mode = NUMBER_CANDIDATE_IS_SMI;
}
+ } else {
+ mode = NUMBER_CANDIDATE_IS_SMI;
}
}
@@ -5473,7 +5449,6 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
- Register size = ToRegister(instr->size());
Register result = ToRegister(instr->result());
// TODO(3095996): Get rid of this. For now, we need to make the
@@ -5482,8 +5457,16 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
__ mov(result, Operand(Smi::FromInt(0)));
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- __ SmiTag(size, size);
- __ push(size);
+ if (instr->size()->IsRegister()) {
+ Register size = ToRegister(instr->size());
+ ASSERT(!size.is(result));
+ __ SmiTag(size);
+ __ push(size);
+ } else {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ Push(Smi::FromInt(size));
+ }
+
if (instr->hydrogen()->CanAllocateInOldPointerSpace()) {
CallRuntimeFromDeferred(
Runtime::kAllocateInOldPointerSpace, 1, instr);
@@ -5566,7 +5549,8 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
// Pick the right runtime function or stub to call.
int properties_count = instr->hydrogen()->constant_properties_length() / 2;
- if (instr->hydrogen()->depth() > 1) {
+ if ((FLAG_track_double_fields && instr->hydrogen()->may_store_doubles()) ||
+ instr->hydrogen()->depth() > 1) {
__ Push(r3, r2, r1, r0);
CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
} else if (flags != ObjectLiteral::kFastElements ||
diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h
index ae175e52d..294dcf205 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.h
+++ b/deps/v8/src/arm/lithium-codegen-arm.h
@@ -57,6 +57,7 @@ class LCodeGen BASE_EMBEDDED {
deopt_jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
prototype_maps_(0, info->zone()),
+ transition_maps_(0, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
@@ -418,6 +419,7 @@ class LCodeGen BASE_EMBEDDED {
ZoneList<JumpTableEntry> deopt_jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
ZoneList<Handle<Map> > prototype_maps_;
+ ZoneList<Handle<Map> > transition_maps_;
int inlined_function_count_;
Scope* const scope_;
Status status_;
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index b7cd3db04..6e0b4a704 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -1933,8 +1933,34 @@ void MacroAssembler::AllocateAsciiConsString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
- Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ Label allocate_new_space, install_map;
+ AllocationFlags flags = TAG_OBJECT;
+
+ ExternalReference high_promotion_mode = ExternalReference::
+ new_space_high_promotion_mode_active_address(isolate());
+ mov(scratch1, Operand(high_promotion_mode));
+ ldr(scratch1, MemOperand(scratch1, 0));
+ cmp(scratch1, Operand::Zero());
+ b(eq, &allocate_new_space);
+
+ Allocate(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
+
+ jmp(&install_map);
+
+ bind(&allocate_new_space);
+ Allocate(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ flags);
+
+ bind(&install_map);
InitializeNewString(result,
length,
@@ -3473,6 +3499,18 @@ void MacroAssembler::CheckPageFlag(
}
+void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated) {
+ if (map->CanBeDeprecated()) {
+ mov(scratch, Operand(map));
+ ldr(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
+ tst(scratch, Operand(Smi::FromInt(Map::Deprecated::kMask)));
+ b(ne, if_deprecated);
+ }
+}
+
+
void MacroAssembler::JumpIfBlack(Register object,
Register scratch0,
Register scratch1,
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index b736c8f3a..90272911c 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -213,6 +213,10 @@ class MacroAssembler: public Assembler {
Condition cc,
Label* condition_met);
+ void CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated);
+
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but scratch will be clobbered.
void JumpIfNotInNewSpace(Register object,
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index 036fd7f87..af65bc70b 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -975,12 +975,14 @@ ReturnType Simulator::GetFromVFPRegister(int reg_index) {
}
-// For use in calls that take two double values, constructed either
+// Runtime FP routines take up to two double arguments and zero
+// or one integer arguments. All are consructed here.
// from r0-r3 or d0 and d1.
-void Simulator::GetFpArgs(double* x, double* y) {
+void Simulator::GetFpArgs(double* x, double* y, int32_t* z) {
if (use_eabi_hardfloat()) {
*x = vfp_registers_[0];
*y = vfp_registers_[1];
+ *z = registers_[1];
} else {
// We use a char buffer to get around the strict-aliasing rules which
// otherwise allow the compiler to optimize away the copy.
@@ -988,44 +990,12 @@ void Simulator::GetFpArgs(double* x, double* y) {
// Registers 0 and 1 -> x.
OS::MemCopy(buffer, registers_, sizeof(*x));
OS::MemCopy(x, buffer, sizeof(*x));
- // Registers 2 and 3 -> y.
- OS::MemCopy(buffer, registers_ + 2, sizeof(*y));
- OS::MemCopy(y, buffer, sizeof(*y));
- }
-}
-
-// For use in calls that take one double value, constructed either
-// from r0 and r1 or d0.
-void Simulator::GetFpArgs(double* x) {
- if (use_eabi_hardfloat()) {
- *x = vfp_registers_[0];
- } else {
- // We use a char buffer to get around the strict-aliasing rules which
- // otherwise allow the compiler to optimize away the copy.
- char buffer[sizeof(*x)];
- // Registers 0 and 1 -> x.
- OS::MemCopy(buffer, registers_, sizeof(*x));
- OS::MemCopy(x, buffer, sizeof(*x));
- }
-}
-
-
-// For use in calls that take one double value constructed either
-// from r0 and r1 or d0 and one integer value.
-void Simulator::GetFpArgs(double* x, int32_t* y) {
- if (use_eabi_hardfloat()) {
- *x = vfp_registers_[0];
- *y = registers_[1];
- } else {
- // We use a char buffer to get around the strict-aliasing rules which
- // otherwise allow the compiler to optimize away the copy.
- char buffer[sizeof(*x)];
- // Registers 0 and 1 -> x.
- OS::MemCopy(buffer, registers_, sizeof(*x));
- OS::MemCopy(x, buffer, sizeof(*x));
- // Register 2 -> y.
+ // Register 2 and 3 -> y.
OS::MemCopy(buffer, registers_ + 2, sizeof(*y));
OS::MemCopy(y, buffer, sizeof(*y));
+ // Register 2 -> z
+ memcpy(buffer, registers_ + 2, sizeof(*z));
+ memcpy(z, buffer, sizeof(*z));
}
}
@@ -1648,10 +1618,12 @@ typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0,
int32_t arg3,
int32_t arg4,
int32_t arg5);
-typedef double (*SimulatorRuntimeFPCall)(int32_t arg0,
- int32_t arg1,
- int32_t arg2,
- int32_t arg3);
+
+// These prototypes handle the four types of FP calls.
+typedef int64_t (*SimulatorRuntimeCompareCall)(double darg0, double darg1);
+typedef double (*SimulatorRuntimeFPFPCall)(double darg0, double darg1);
+typedef double (*SimulatorRuntimeFPCall)(double darg0);
+typedef double (*SimulatorRuntimeFPIntCall)(double darg0, int32_t arg0);
// This signature supports direct call in to API function native callback
// (refer to InvocationCallback in v8.h).
@@ -1717,27 +1689,27 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
intptr_t external =
reinterpret_cast<intptr_t>(redirection->external_function());
if (fp_call) {
+ double dval0, dval1; // one or two double parameters
+ int32_t ival; // zero or one integer parameters
+ int64_t iresult = 0; // integer return value
+ double dresult = 0; // double return value
+ GetFpArgs(&dval0, &dval1, &ival);
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
- SimulatorRuntimeFPCall target =
- reinterpret_cast<SimulatorRuntimeFPCall>(external);
- double dval0, dval1;
- int32_t ival;
+ SimulatorRuntimeCall generic_target =
+ reinterpret_cast<SimulatorRuntimeCall>(external);
switch (redirection->type()) {
case ExternalReference::BUILTIN_FP_FP_CALL:
case ExternalReference::BUILTIN_COMPARE_CALL:
- GetFpArgs(&dval0, &dval1);
PrintF("Call to host function at %p with args %f, %f",
- FUNCTION_ADDR(target), dval0, dval1);
+ FUNCTION_ADDR(generic_target), dval0, dval1);
break;
case ExternalReference::BUILTIN_FP_CALL:
- GetFpArgs(&dval0);
PrintF("Call to host function at %p with arg %f",
- FUNCTION_ADDR(target), dval0);
+ FUNCTION_ADDR(generic_target), dval0);
break;
case ExternalReference::BUILTIN_FP_INT_CALL:
- GetFpArgs(&dval0, &ival);
PrintF("Call to host function at %p with args %f, %d",
- FUNCTION_ADDR(target), dval0, ival);
+ FUNCTION_ADDR(generic_target), dval0, ival);
break;
default:
UNREACHABLE();
@@ -1749,22 +1721,54 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
PrintF("\n");
}
CHECK(stack_aligned);
- if (redirection->type() != ExternalReference::BUILTIN_COMPARE_CALL) {
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_COMPARE_CALL: {
+ SimulatorRuntimeCompareCall target =
+ reinterpret_cast<SimulatorRuntimeCompareCall>(external);
+ iresult = target(dval0, dval1);
+ set_register(r0, static_cast<int32_t>(iresult));
+ set_register(r1, static_cast<int32_t>(iresult >> 32));
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_FP_CALL: {
+ SimulatorRuntimeFPFPCall target =
+ reinterpret_cast<SimulatorRuntimeFPFPCall>(external);
+ dresult = target(dval0, dval1);
+ SetFpResult(dresult);
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_CALL: {
SimulatorRuntimeFPCall target =
- reinterpret_cast<SimulatorRuntimeFPCall>(external);
- double result = target(arg0, arg1, arg2, arg3);
- SetFpResult(result);
- } else {
- SimulatorRuntimeCall target =
- reinterpret_cast<SimulatorRuntimeCall>(external);
- int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
- int32_t lo_res = static_cast<int32_t>(result);
- int32_t hi_res = static_cast<int32_t>(result >> 32);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %08x\n", lo_res);
+ reinterpret_cast<SimulatorRuntimeFPCall>(external);
+ dresult = target(dval0);
+ SetFpResult(dresult);
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_INT_CALL: {
+ SimulatorRuntimeFPIntCall target =
+ reinterpret_cast<SimulatorRuntimeFPIntCall>(external);
+ dresult = target(dval0, ival);
+ SetFpResult(dresult);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_COMPARE_CALL:
+ PrintF("Returned %08x\n", static_cast<int32_t>(iresult));
+ break;
+ case ExternalReference::BUILTIN_FP_FP_CALL:
+ case ExternalReference::BUILTIN_FP_CALL:
+ case ExternalReference::BUILTIN_FP_INT_CALL:
+ PrintF("Returned %f\n", dresult);
+ break;
+ default:
+ UNREACHABLE();
+ break;
}
- set_register(r0, lo_res);
- set_register(r1, hi_res);
}
} else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
SimulatorRuntimeDirectApiCall target =
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h
index 674ff42d6..45ae999b5 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/arm/simulator-arm.h
@@ -348,10 +348,8 @@ class Simulator {
void* external_function,
v8::internal::ExternalReference::Type type);
- // For use in calls that take double value arguments.
- void GetFpArgs(double* x, double* y);
- void GetFpArgs(double* x);
- void GetFpArgs(double* x, int32_t* y);
+ // Handle arguments and return value for runtime FP functions.
+ void GetFpArgs(double* x, double* y, int32_t* z);
void SetFpResult(const double& result);
void TrashCallerSaveRegisters();
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index ddcbd623b..127bf3fdd 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -315,11 +315,13 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
}
-void StubCompiler::DoGenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst,
- Register src,
- bool inobject,
- int index) {
+void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
+ Register dst,
+ Register src,
+ bool inobject,
+ int index,
+ Representation representation) {
+ ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
int offset = index * kPointerSize;
if (!inobject) {
// Calculate the offset into the properties array.
@@ -451,8 +453,10 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
Register value_reg,
Register scratch1,
Register scratch2,
+ Register scratch3,
Label* miss_label,
- Label* miss_restore_name) {
+ Label* miss_restore_name,
+ Label* slow) {
// r0 : value
Label exit;
@@ -465,6 +469,15 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
}
+ int descriptor = transition->LastAdded();
+ DescriptorArray* descriptors = transition->instance_descriptors();
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+ Representation representation = details.representation();
+ ASSERT(!representation.IsNone());
+
+ // Ensure no transitions to deprecated maps are followed.
+ __ CheckMapDeprecated(transition, scratch1, miss_label);
+
// Check that we are allowed to write this.
if (object->GetPrototype()->IsJSObject()) {
JSObject* holder;
@@ -480,7 +493,7 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
}
Register holder_reg = CheckPrototypes(
object, receiver_reg, Handle<JSObject>(holder), name_reg,
- scratch1, scratch2, name, miss_restore_name);
+ scratch1, scratch2, name, miss_restore_name, SKIP_RECEIVER);
// If no property was found, and the holder (the last object in the
// prototype chain) is in slow mode, we need to do a negative lookup on the
// holder.
@@ -499,6 +512,30 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
}
}
+ Register storage_reg = name_reg;
+
+ if (FLAG_track_fields && representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_restore_name);
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ Label do_store, heap_number;
+ __ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow);
+
+ __ JumpIfNotSmi(value_reg, &heap_number);
+ __ SmiUntag(scratch1, value_reg);
+ __ vmov(s0, scratch1);
+ __ vcvt_f64_s32(d0, s0);
+ __ jmp(&do_store);
+
+ __ bind(&heap_number);
+ __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
+ miss_restore_name, DONT_DO_SMI_CHECK);
+ __ vldr(d0, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+
+ __ bind(&do_store);
+ __ vstr(d0, FieldMemOperand(storage_reg, HeapNumber::kValueOffset));
+ }
+
// Stub never generated for non-global objects that require access
// checks.
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
@@ -527,7 +564,7 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ RecordWriteField(receiver_reg,
HeapObject::kMapOffset,
scratch1,
- name_reg,
+ scratch2,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
@@ -545,40 +582,60 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
- __ str(value_reg, FieldMemOperand(receiver_reg, offset));
+ if (FLAG_track_double_fields && representation.IsDouble()) {
+ __ str(storage_reg, FieldMemOperand(receiver_reg, offset));
+ } else {
+ __ str(value_reg, FieldMemOperand(receiver_reg, offset));
+ }
+
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(value_reg, &exit);
- // Skip updating write barrier if storing a smi.
- __ JumpIfSmi(value_reg, &exit);
-
- // Update the write barrier for the array address.
- // Pass the now unused name_reg as a scratch register.
- __ mov(name_reg, value_reg);
- __ RecordWriteField(receiver_reg,
- offset,
- name_reg,
- scratch1,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
+ // Update the write barrier for the array address.
+ // Pass the now unused name_reg as a scratch register.
+ if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ __ mov(name_reg, value_reg);
+ } else {
+ ASSERT(storage_reg.is(name_reg));
+ }
+ __ RecordWriteField(receiver_reg,
+ offset,
+ name_reg,
+ scratch1,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
+ }
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array
__ ldr(scratch1,
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ str(value_reg, FieldMemOperand(scratch1, offset));
+ if (FLAG_track_double_fields && representation.IsDouble()) {
+ __ str(storage_reg, FieldMemOperand(scratch1, offset));
+ } else {
+ __ str(value_reg, FieldMemOperand(scratch1, offset));
+ }
- // Skip updating write barrier if storing a smi.
- __ JumpIfSmi(value_reg, &exit);
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(value_reg, &exit);
- // Update the write barrier for the array address.
- // Ok to clobber receiver_reg and name_reg, since we return.
- __ mov(name_reg, value_reg);
- __ RecordWriteField(scratch1,
- offset,
- name_reg,
- receiver_reg,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
+ // Update the write barrier for the array address.
+ // Ok to clobber receiver_reg and name_reg, since we return.
+ if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ __ mov(name_reg, value_reg);
+ } else {
+ ASSERT(storage_reg.is(name_reg));
+ }
+ __ RecordWriteField(scratch1,
+ offset,
+ name_reg,
+ receiver_reg,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
+ }
}
// Return the value (register r0).
@@ -624,24 +681,63 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// object and the number of in-object properties is not going to change.
index -= object->map()->inobject_properties();
+ Representation representation = lookup->representation();
+ ASSERT(!representation.IsNone());
+ if (FLAG_track_fields && representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ // Load the double storage.
+ if (index < 0) {
+ int offset = object->map()->instance_size() + (index * kPointerSize);
+ __ ldr(scratch1, FieldMemOperand(receiver_reg, offset));
+ } else {
+ __ ldr(scratch1,
+ FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ __ ldr(scratch1, FieldMemOperand(scratch1, offset));
+ }
+
+ // Store the value into the storage.
+ Label do_store, heap_number;
+ __ JumpIfNotSmi(value_reg, &heap_number);
+ __ SmiUntag(scratch2, value_reg);
+ __ vmov(s0, scratch2);
+ __ vcvt_f64_s32(d0, s0);
+ __ jmp(&do_store);
+
+ __ bind(&heap_number);
+ __ CheckMap(value_reg, scratch2, Heap::kHeapNumberMapRootIndex,
+ miss_label, DONT_DO_SMI_CHECK);
+ __ vldr(d0, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+
+ __ bind(&do_store);
+ __ vstr(d0, FieldMemOperand(scratch1, HeapNumber::kValueOffset));
+ // Return the value (register r0).
+ ASSERT(value_reg.is(r0));
+ __ Ret();
+ return;
+ }
+
// TODO(verwaest): Share this code as a code stub.
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
__ str(value_reg, FieldMemOperand(receiver_reg, offset));
- // Skip updating write barrier if storing a smi.
- __ JumpIfSmi(value_reg, &exit);
-
- // Update the write barrier for the array address.
- // Pass the now unused name_reg as a scratch register.
- __ mov(name_reg, value_reg);
- __ RecordWriteField(receiver_reg,
- offset,
- name_reg,
- scratch1,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(value_reg, &exit);
+
+ // Update the write barrier for the array address.
+ // Pass the now unused name_reg as a scratch register.
+ __ mov(name_reg, value_reg);
+ __ RecordWriteField(receiver_reg,
+ offset,
+ name_reg,
+ scratch1,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
+ }
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
@@ -650,18 +746,20 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
__ str(value_reg, FieldMemOperand(scratch1, offset));
- // Skip updating write barrier if storing a smi.
- __ JumpIfSmi(value_reg, &exit);
-
- // Update the write barrier for the array address.
- // Ok to clobber receiver_reg and name_reg, since we return.
- __ mov(name_reg, value_reg);
- __ RecordWriteField(scratch1,
- offset,
- name_reg,
- receiver_reg,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(value_reg, &exit);
+
+ // Update the write barrier for the array address.
+ // Ok to clobber receiver_reg and name_reg, since we return.
+ __ mov(name_reg, value_reg);
+ __ RecordWriteField(scratch1,
+ offset,
+ name_reg,
+ receiver_reg,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
+ }
}
// Return the value (register r0).
@@ -1270,9 +1368,20 @@ void BaseLoadStubCompiler::NonexistentHandlerFrontend(
void BaseLoadStubCompiler::GenerateLoadField(Register reg,
Handle<JSObject> holder,
- PropertyIndex index) {
- GenerateFastPropertyLoad(masm(), r0, reg, holder, index);
- __ Ret();
+ PropertyIndex field,
+ Representation representation) {
+ if (!reg.is(receiver())) __ mov(receiver(), reg);
+ if (kind() == Code::LOAD_IC) {
+ LoadFieldStub stub(field.is_inobject(holder),
+ field.translate(holder),
+ representation);
+ GenerateTailCall(masm(), stub.GetCode(isolate()));
+ } else {
+ KeyedLoadFieldStub stub(field.is_inobject(holder),
+ field.translate(holder),
+ representation);
+ GenerateTailCall(masm(), stub.GetCode(isolate()));
+ }
}
@@ -1496,7 +1605,8 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
// Do the right check and compute the holder register.
Register reg = CheckPrototypes(object, r0, holder, r1, r3, r4, name, &miss);
- GenerateFastPropertyLoad(masm(), r1, reg, holder, index);
+ GenerateFastPropertyLoad(masm(), r1, reg, index.is_inobject(holder),
+ index.translate(holder), Representation::Tagged());
GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
@@ -2907,19 +3017,25 @@ Handle<Code> BaseLoadStubCompiler::CompilePolymorphicIC(
Register map_reg = scratch1();
int receiver_count = receiver_maps->length();
+ int number_of_handled_maps = 0;
__ ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
for (int current = 0; current < receiver_count; ++current) {
- __ mov(ip, Operand(receiver_maps->at(current)));
- __ cmp(map_reg, ip);
- __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq);
+ Handle<Map> map = receiver_maps->at(current);
+ if (!map->is_deprecated()) {
+ number_of_handled_maps++;
+ __ mov(ip, Operand(receiver_maps->at(current)));
+ __ cmp(map_reg, ip);
+ __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq);
+ }
}
+ ASSERT(number_of_handled_maps != 0);
__ bind(&miss);
TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
InlineCacheState state =
- receiver_maps->length() > 1 ? POLYMORPHIC : MONOMORPHIC;
+ number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
return GetICCode(kind(), type, name, state);
}
diff --git a/deps/v8/src/arraybuffer.js b/deps/v8/src/arraybuffer.js
new file mode 100644
index 000000000..2b0c3dd85
--- /dev/null
+++ b/deps/v8/src/arraybuffer.js
@@ -0,0 +1,100 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"use strict";
+
+var $ArrayBuffer = global.ArrayBuffer;
+
+// -------------------------------------------------------------------
+
+function ArrayBufferConstructor(byteLength) { // length = 1
+ if (%_IsConstructCall()) {
+ var l = TO_POSITIVE_INTEGER(byteLength);
+ %ArrayBufferInitialize(this, l);
+ } else {
+ return new $ArrayBuffer(byteLength);
+ }
+}
+
+function ArrayBufferGetByteLength() {
+ if (!IS_ARRAYBUFFER(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['ArrayBuffer.prototype.byteLength', this]);
+ }
+ return %ArrayBufferGetByteLength(this);
+}
+
+// ES6 Draft 15.13.5.5.3
+function ArrayBufferSlice(start, end) {
+ if (!IS_ARRAYBUFFER(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['ArrayBuffer.prototype.slice', this]);
+ }
+
+ var relativeStart = TO_INTEGER(start);
+ var first;
+ if (relativeStart < 0) {
+ first = MathMax(this.byteLength + relativeStart, 0);
+ } else {
+ first = MathMin(relativeStart, this.byteLength);
+ }
+ var relativeEnd = IS_UNDEFINED(end) ? this.byteLength : TO_INTEGER(end);
+ var fin;
+ if (relativeEnd < 0) {
+ fin = MathMax(this.byteLength + relativeEnd, 0);
+ } else {
+ fin = MathMin(relativeEnd, this.byteLength);
+ }
+
+ var newLen = fin - first;
+ // TODO(dslomov): implement inheritance
+ var result = new $ArrayBuffer(newLen);
+
+ %ArrayBufferSliceImpl(this, result, first);
+ return result;
+}
+
+function SetUpArrayBuffer() {
+ %CheckIsBootstrapping();
+
+ // Set up the ArrayBuffer constructor function.
+ %SetCode($ArrayBuffer, ArrayBufferConstructor);
+ %FunctionSetPrototype($ArrayBuffer, new $Object());
+
+ // Set up the constructor property on the ArrayBuffer prototype object.
+ %SetProperty($ArrayBuffer.prototype, "constructor", $ArrayBuffer, DONT_ENUM);
+
+ InstallGetter($ArrayBuffer.prototype, "byteLength", ArrayBufferGetByteLength);
+
+ InstallFunctions($ArrayBuffer.prototype, DONT_ENUM, $Array(
+ "slice", ArrayBufferSlice
+ ));
+}
+
+SetUpArrayBuffer();
+
+
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index fff588af3..6b0c4b845 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -1203,6 +1203,13 @@ ExternalReference ExternalReference::old_data_space_allocation_limit_address(
}
+ExternalReference ExternalReference::
+ new_space_high_promotion_mode_active_address(Isolate* isolate) {
+ return ExternalReference(
+ isolate->heap()->NewSpaceHighPromotionModeActiveAddress());
+}
+
+
ExternalReference ExternalReference::handle_scope_level_address(
Isolate* isolate) {
return ExternalReference(HandleScope::current_level_address(isolate));
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index 32424cfb6..6abd5c55d 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -757,6 +757,8 @@ class ExternalReference BASE_EMBEDDED {
Isolate* isolate);
static ExternalReference old_data_space_allocation_limit_address(
Isolate* isolate);
+ static ExternalReference new_space_high_promotion_mode_active_address(
+ Isolate* isolate);
static ExternalReference double_fp_operation(Token::Value operation,
Isolate* isolate);
diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h
index 10ae7de45..9ffb00db0 100644
--- a/deps/v8/src/ast.h
+++ b/deps/v8/src/ast.h
@@ -277,6 +277,14 @@ class SmallMapList {
bool is_empty() const { return list_.is_empty(); }
int length() const { return list_.length(); }
+ void AddMapIfMissing(Handle<Map> map, Zone* zone) {
+ map = Map::CurrentMapForDeprecated(map);
+ for (int i = 0; i < length(); ++i) {
+ if (at(i).is_identical_to(map)) return;
+ }
+ Add(map, zone);
+ }
+
void Add(Handle<Map> handle, Zone* zone) {
list_.Add(handle.location(), zone);
}
@@ -1324,10 +1332,9 @@ class ObjectLiteral: public MaterializedLiteral {
return constant_properties_;
}
ZoneList<Property*>* properties() const { return properties_; }
-
bool fast_elements() const { return fast_elements_; }
-
- bool has_function() { return has_function_; }
+ bool may_store_doubles() const { return may_store_doubles_; }
+ bool has_function() const { return has_function_; }
// Mark all computed expressions that are bound to a key that
// is shadowed by a later occurrence of the same key. For the
@@ -1354,17 +1361,20 @@ class ObjectLiteral: public MaterializedLiteral {
bool is_simple,
bool fast_elements,
int depth,
+ bool may_store_doubles,
bool has_function)
: MaterializedLiteral(isolate, literal_index, is_simple, depth),
constant_properties_(constant_properties),
properties_(properties),
fast_elements_(fast_elements),
+ may_store_doubles_(may_store_doubles),
has_function_(has_function) {}
private:
Handle<FixedArray> constant_properties_;
ZoneList<Property*>* properties_;
bool fast_elements_;
+ bool may_store_doubles_;
bool has_function_;
};
@@ -2849,10 +2859,11 @@ class AstNodeFactory BASE_EMBEDDED {
bool is_simple,
bool fast_elements,
int depth,
+ bool may_store_doubles,
bool has_function) {
ObjectLiteral* lit = new(zone_) ObjectLiteral(
isolate_, constant_properties, properties, literal_index,
- is_simple, fast_elements, depth, has_function);
+ is_simple, fast_elements, depth, may_store_doubles, has_function);
VISIT_AND_RETURN(ObjectLiteral, lit)
}
diff --git a/deps/v8/src/atomicops_internals_x86_gcc.h b/deps/v8/src/atomicops_internals_x86_gcc.h
index 6e55b5018..e58d598fb 100644
--- a/deps/v8/src/atomicops_internals_x86_gcc.h
+++ b/deps/v8/src/atomicops_internals_x86_gcc.h
@@ -168,7 +168,7 @@ inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
return *ptr;
}
-#if defined(__x86_64__)
+#if defined(__x86_64__) && defined(V8_HOST_ARCH_64_BIT)
// 64-bit low-level operations on 64-bit platform.
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index 85bf96e4d..b0d3a5e50 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -201,7 +201,7 @@ class Genesis BASE_EMBEDDED {
ElementsKind elements_kind);
bool InstallNatives();
- void InstallTypedArray(const char* name);
+ Handle<JSFunction> InstallTypedArray(const char* name);
bool InstallExperimentalNatives();
void InstallBuiltinFunctionIds();
void InstallJSFunctionResultCaches();
@@ -979,28 +979,32 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// ECMA-262, section 15.10.7.1.
FieldDescriptor field(heap->source_string(),
JSRegExp::kSourceFieldIndex,
- final);
+ final,
+ Representation::Tagged());
initial_map->AppendDescriptor(&field, witness);
}
{
// ECMA-262, section 15.10.7.2.
FieldDescriptor field(heap->global_string(),
JSRegExp::kGlobalFieldIndex,
- final);
+ final,
+ Representation::Tagged());
initial_map->AppendDescriptor(&field, witness);
}
{
// ECMA-262, section 15.10.7.3.
FieldDescriptor field(heap->ignore_case_string(),
JSRegExp::kIgnoreCaseFieldIndex,
- final);
+ final,
+ Representation::Tagged());
initial_map->AppendDescriptor(&field, witness);
}
{
// ECMA-262, section 15.10.7.4.
FieldDescriptor field(heap->multiline_string(),
JSRegExp::kMultilineFieldIndex,
- final);
+ final,
+ Representation::Tagged());
initial_map->AppendDescriptor(&field, witness);
}
{
@@ -1009,7 +1013,8 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
FieldDescriptor field(heap->last_index_string(),
JSRegExp::kLastIndexFieldIndex,
- writable);
+ writable,
+ Representation::Tagged());
initial_map->AppendDescriptor(&field, witness);
}
@@ -1161,7 +1166,8 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
map->set_instance_descriptors(*descriptors);
{ // length
- FieldDescriptor d(*factory->length_string(), 0, DONT_ENUM);
+ FieldDescriptor d(
+ *factory->length_string(), 0, DONT_ENUM, Representation::Tagged());
map->AppendDescriptor(&d, witness);
}
{ // callee
@@ -1270,11 +1276,11 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
}
-void Genesis::InstallTypedArray(const char* name) {
+Handle<JSFunction> Genesis::InstallTypedArray(const char* name) {
Handle<JSObject> global = Handle<JSObject>(native_context()->global_object());
- InstallFunction(global, name, JS_TYPED_ARRAY_TYPE,
- JSTypedArray::kSize, isolate()->initial_object_prototype(),
- Builtins::kIllegal, true);
+ return InstallFunction(global, name, JS_TYPED_ARRAY_TYPE,
+ JSTypedArray::kSize, isolate()->initial_object_prototype(),
+ Builtins::kIllegal, true);
}
@@ -1311,26 +1317,36 @@ void Genesis::InitializeExperimentalGlobal() {
}
}
+ if (FLAG_harmony_array_buffer) {
+ // -- A r r a y B u f f e r
+ Handle<JSFunction> array_buffer_fun =
+ InstallFunction(global, "ArrayBuffer", JS_ARRAY_BUFFER_TYPE,
+ JSArrayBuffer::kSize,
+ isolate()->initial_object_prototype(),
+ Builtins::kIllegal, true);
+ native_context()->set_array_buffer_fun(*array_buffer_fun);
+ }
+
if (FLAG_harmony_typed_arrays) {
- { // -- A r r a y B u f f e r
- Handle<JSFunction> array_buffer_fun =
- InstallFunction(global, "__ArrayBuffer", JS_ARRAY_BUFFER_TYPE,
- JSArrayBuffer::kSize,
- isolate()->initial_object_prototype(),
- Builtins::kIllegal, true);
- native_context()->set_array_buffer_fun(*array_buffer_fun);
- }
- {
- // -- T y p e d A r r a y s
- InstallTypedArray("__Int8Array");
- InstallTypedArray("__Uint8Array");
- InstallTypedArray("__Int16Array");
- InstallTypedArray("__Uint16Array");
- InstallTypedArray("__Int32Array");
- InstallTypedArray("__Uint32Array");
- InstallTypedArray("__Float32Array");
- InstallTypedArray("__Float64Array");
- }
+ // -- T y p e d A r r a y s
+ Handle<JSFunction> int8_fun = InstallTypedArray("Int8Array");
+ native_context()->set_int8_array_fun(*int8_fun);
+ Handle<JSFunction> uint8_fun = InstallTypedArray("Uint8Array");
+ native_context()->set_uint8_array_fun(*uint8_fun);
+ Handle<JSFunction> int16_fun = InstallTypedArray("Int16Array");
+ native_context()->set_int16_array_fun(*int16_fun);
+ Handle<JSFunction> uint16_fun = InstallTypedArray("Uint16Array");
+ native_context()->set_uint16_array_fun(*uint16_fun);
+ Handle<JSFunction> int32_fun = InstallTypedArray("Int32Array");
+ native_context()->set_int32_array_fun(*int32_fun);
+ Handle<JSFunction> uint32_fun = InstallTypedArray("Uint32Array");
+ native_context()->set_uint32_array_fun(*uint32_fun);
+ Handle<JSFunction> float_fun = InstallTypedArray("Float32Array");
+ native_context()->set_float_array_fun(*float_fun);
+ Handle<JSFunction> double_fun = InstallTypedArray("Float64Array");
+ native_context()->set_double_array_fun(*double_fun);
+ Handle<JSFunction> uint8c_fun = InstallTypedArray("Uint8ClampedArray");
+ native_context()->set_uint8c_array_fun(*uint8c_fun);
}
if (FLAG_harmony_generators) {
@@ -1371,6 +1387,40 @@ void Genesis::InitializeExperimentalGlobal() {
*generator_object_prototype);
native_context()->set_generator_object_prototype_map(
*generator_object_prototype_map);
+
+ // Create a map for generator result objects.
+ ASSERT(object_map->inobject_properties() == 0);
+ STATIC_ASSERT(JSGeneratorObject::kResultPropertyCount == 2);
+ Handle<Map> generator_result_map = factory()->CopyMap(object_map,
+ JSGeneratorObject::kResultPropertyCount);
+ ASSERT(generator_result_map->inobject_properties() ==
+ JSGeneratorObject::kResultPropertyCount);
+
+ Handle<DescriptorArray> descriptors = factory()->NewDescriptorArray(0,
+ JSGeneratorObject::kResultPropertyCount);
+ DescriptorArray::WhitenessWitness witness(*descriptors);
+ generator_result_map->set_instance_descriptors(*descriptors);
+
+ Handle<String> value_string = factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("value"));
+ FieldDescriptor value_descr(*value_string,
+ JSGeneratorObject::kResultValuePropertyIndex,
+ NONE,
+ Representation::Tagged());
+ generator_result_map->AppendDescriptor(&value_descr, witness);
+
+ Handle<String> done_string = factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("done"));
+ FieldDescriptor done_descr(*done_string,
+ JSGeneratorObject::kResultDonePropertyIndex,
+ NONE,
+ Representation::Tagged());
+ generator_result_map->AppendDescriptor(&done_descr, witness);
+
+ generator_result_map->set_unused_property_fields(0);
+ ASSERT_EQ(JSGeneratorObject::kResultSize,
+ generator_result_map->instance_size());
+ native_context()->set_generator_result_map(*generator_result_map);
}
}
@@ -1924,14 +1974,16 @@ bool Genesis::InstallNatives() {
{
FieldDescriptor index_field(heap()->index_string(),
JSRegExpResult::kIndexIndex,
- NONE);
+ NONE,
+ Representation::Tagged());
initial_map->AppendDescriptor(&index_field, witness);
}
{
FieldDescriptor input_field(heap()->input_string(),
JSRegExpResult::kInputIndex,
- NONE);
+ NONE,
+ Representation::Tagged());
initial_map->AppendDescriptor(&input_field, witness);
}
@@ -1974,6 +2026,11 @@ bool Genesis::InstallExperimentalNatives() {
"native object-observe.js") == 0) {
if (!CompileExperimentalBuiltin(isolate(), i)) return false;
}
+ if (FLAG_harmony_array_buffer &&
+ strcmp(ExperimentalNatives::GetScriptName(i).start(),
+ "native arraybuffer.js") == 0) {
+ if (!CompileExperimentalBuiltin(isolate(), i)) return false;
+ }
if (FLAG_harmony_typed_arrays &&
strcmp(ExperimentalNatives::GetScriptName(i).start(),
"native typedarray.js") == 0) {
@@ -2352,14 +2409,15 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
if (from->HasFastProperties()) {
Handle<DescriptorArray> descs =
Handle<DescriptorArray>(from->map()->instance_descriptors());
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
+ for (int i = 0; i < from->map()->NumberOfOwnDescriptors(); i++) {
PropertyDetails details = descs->GetDetails(i);
switch (details.type()) {
case FIELD: {
HandleScope inner(isolate());
Handle<Name> key = Handle<Name>(descs->GetKey(i));
int index = descs->GetFieldIndex(i);
- Handle<Object> value = Handle<Object>(from->FastPropertyAt(index),
+ ASSERT(!descs->GetDetails(i).representation().IsDouble());
+ Handle<Object> value = Handle<Object>(from->RawFastPropertyAt(index),
isolate());
CHECK_NOT_EMPTY_HANDLE(isolate(),
JSObject::SetLocalPropertyIgnoreAttributes(
@@ -2386,9 +2444,8 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
// Add to dictionary.
Handle<Name> key = Handle<Name>(descs->GetKey(i));
Handle<Object> callbacks(descs->GetCallbacksObject(i), isolate());
- PropertyDetails d = PropertyDetails(details.attributes(),
- CALLBACKS,
- details.descriptor_index());
+ PropertyDetails d = PropertyDetails(
+ details.attributes(), CALLBACKS, i + 1);
JSObject::SetNormalizedProperty(to, key, callbacks, d);
break;
}
diff --git a/deps/v8/src/builtins-decls.h b/deps/v8/src/builtins-decls.h
new file mode 100644
index 000000000..beb5dd1e8
--- /dev/null
+++ b/deps/v8/src/builtins-decls.h
@@ -0,0 +1,40 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_BUILTINS_DECLS_H_
+#define V8_BUILTINS_DECLS_H_
+
+#include "arguments.h"
+
+namespace v8 {
+namespace internal {
+
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, ArrayConstructor_StubFailure);
+
+} } // namespace v8::internal
+
+#endif // V8_BUILTINS_DECLS_H_
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
index 571818030..661ee9437 100644
--- a/deps/v8/src/builtins.cc
+++ b/deps/v8/src/builtins.cc
@@ -125,23 +125,31 @@ BUILTIN_LIST_C(DEF_ARG_TYPE)
#ifdef DEBUG
-#define BUILTIN(name) \
- MUST_USE_RESULT static MaybeObject* Builtin_Impl_##name( \
- name##ArgumentsType args, Isolate* isolate); \
- MUST_USE_RESULT static MaybeObject* Builtin_##name( \
- name##ArgumentsType args, Isolate* isolate) { \
- ASSERT(isolate == Isolate::Current()); \
- args.Verify(); \
- return Builtin_Impl_##name(args, isolate); \
- } \
- MUST_USE_RESULT static MaybeObject* Builtin_Impl_##name( \
+#define BUILTIN(name) \
+ MUST_USE_RESULT static MaybeObject* Builtin_Impl_##name( \
+ name##ArgumentsType args, Isolate* isolate); \
+ MUST_USE_RESULT static MaybeObject* Builtin_##name( \
+ int args_length, Object** args_object, Isolate* isolate) { \
+ name##ArgumentsType args(args_length, args_object); \
+ ASSERT(isolate == Isolate::Current()); \
+ args.Verify(); \
+ return Builtin_Impl_##name(args, isolate); \
+ } \
+ MUST_USE_RESULT static MaybeObject* Builtin_Impl_##name( \
name##ArgumentsType args, Isolate* isolate)
#else // For release mode.
-#define BUILTIN(name) \
- static MaybeObject* Builtin_##name(name##ArgumentsType args, Isolate* isolate)
-
+#define BUILTIN(name) \
+ static MaybeObject* Builtin_impl##name( \
+ name##ArgumentsType args, Isolate* isolate); \
+ static MaybeObject* Builtin_##name( \
+ int args_length, Object** args_object, Isolate* isolate) { \
+ name##ArgumentsType args(args_length, args_object); \
+ return Builtin_impl##name(args, isolate); \
+ } \
+ static MaybeObject* Builtin_impl##name( \
+ name##ArgumentsType args, Isolate* isolate)
#endif
@@ -186,55 +194,58 @@ BUILTIN(EmptyFunction) {
}
-#define CONVERT_ARG_STUB_CALLER_ARGS(name) \
- Arguments* name = reinterpret_cast<Arguments*>(args[0]);
-
-
RUNTIME_FUNCTION(MaybeObject*, ArrayConstructor_StubFailure) {
- CONVERT_ARG_STUB_CALLER_ARGS(caller_args);
- ASSERT(args.length() == 2);
- Handle<Object> type_info = args.at<Object>(1);
+ // If we get 2 arguments then they are the stub parameters (constructor, type
+ // info). If we get 3, then the first one is a pointer to the arguments
+ // passed by the caller.
+ Arguments empty_args(0, NULL);
+ bool no_caller_args = args.length() == 2;
+ ASSERT(no_caller_args || args.length() == 3);
+ int parameters_start = no_caller_args ? 0 : 1;
+ Arguments* caller_args = no_caller_args
+ ? &empty_args
+ : reinterpret_cast<Arguments*>(args[0]);
+ Handle<JSFunction> constructor = args.at<JSFunction>(parameters_start);
+ Handle<Object> type_info = args.at<Object>(parameters_start + 1);
- JSArray* array = NULL;
bool holey = false;
if (caller_args->length() == 1 && (*caller_args)[0]->IsSmi()) {
int value = Smi::cast((*caller_args)[0])->value();
holey = (value > 0 && value < JSObject::kInitialMaxFastElementArray);
}
+ JSArray* array;
MaybeObject* maybe_array;
- if (*type_info != isolate->heap()->undefined_value()) {
+ if (*type_info != isolate->heap()->undefined_value() &&
+ JSGlobalPropertyCell::cast(*type_info)->value()->IsSmi()) {
JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(*type_info);
- if (cell->value()->IsSmi()) {
- Smi* smi = Smi::cast(cell->value());
- ElementsKind to_kind = static_cast<ElementsKind>(smi->value());
- if (holey && !IsFastHoleyElementsKind(to_kind)) {
- to_kind = GetHoleyElementsKind(to_kind);
- // Update the allocation site info to reflect the advice alteration.
- cell->set_value(Smi::FromInt(to_kind));
- }
-
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(to_kind);
- if (mode == TRACK_ALLOCATION_SITE) {
- maybe_array = isolate->heap()->AllocateEmptyJSArrayWithAllocationSite(
- to_kind, type_info);
- } else {
- maybe_array = isolate->heap()->AllocateEmptyJSArray(to_kind);
- }
- if (!maybe_array->To(&array)) return maybe_array;
+ Smi* smi = Smi::cast(cell->value());
+ ElementsKind to_kind = static_cast<ElementsKind>(smi->value());
+ if (holey && !IsFastHoleyElementsKind(to_kind)) {
+ to_kind = GetHoleyElementsKind(to_kind);
+ // Update the allocation site info to reflect the advice alteration.
+ cell->set_value(Smi::FromInt(to_kind));
}
- }
-
- ElementsKind kind = GetInitialFastElementsKind();
- if (holey) {
- kind = GetHoleyElementsKind(kind);
- }
- if (array == NULL) {
- maybe_array = isolate->heap()->AllocateEmptyJSArray(kind);
+ maybe_array = isolate->heap()->AllocateJSObjectWithAllocationSite(
+ *constructor, type_info);
+ if (!maybe_array->To(&array)) return maybe_array;
+ } else {
+ ElementsKind kind = constructor->initial_map()->elements_kind();
+ ASSERT(kind == GetInitialFastElementsKind());
+ maybe_array = isolate->heap()->AllocateJSObject(*constructor);
if (!maybe_array->To(&array)) return maybe_array;
+ // We might need to transition to holey
+ if (holey) {
+ kind = GetHoleyElementsKind(kind);
+ maybe_array = array->TransitionElementsKind(kind);
+ if (maybe_array->IsFailure()) return maybe_array;
+ }
}
+ maybe_array = isolate->heap()->AllocateJSArrayStorage(array, 0, 0,
+ DONT_INITIALIZE_ARRAY_ELEMENTS);
+ if (maybe_array->IsFailure()) return maybe_array;
maybe_array = ArrayConstructInitializeElements(array, caller_args);
if (maybe_array->IsFailure()) return maybe_array;
return array;
@@ -1500,6 +1511,11 @@ static void Generate_KeyedLoadIC_NonStrictArguments(MacroAssembler* masm) {
KeyedLoadIC::GenerateNonStrictArguments(masm);
}
+static void Generate_StoreIC_Slow(MacroAssembler* masm) {
+ StoreIC::GenerateSlow(masm);
+}
+
+
static void Generate_StoreIC_Initialize(MacroAssembler* masm) {
StoreIC::GenerateInitialize(masm);
}
@@ -1617,6 +1633,11 @@ static void Generate_KeyedStoreIC_DebugBreak(MacroAssembler* masm) {
}
+static void Generate_CompareNilIC_DebugBreak(MacroAssembler* masm) {
+ Debug::GenerateCompareNilICDebugBreak(masm);
+}
+
+
static void Generate_Return_DebugBreak(MacroAssembler* masm) {
Debug::GenerateReturnDebugBreak(masm);
}
diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h
index ab7722832..6fc17c45c 100644
--- a/deps/v8/src/builtins.h
+++ b/deps/v8/src/builtins.h
@@ -124,6 +124,8 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(StoreIC_Miss, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
+ V(StoreIC_Slow, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(KeyedStoreIC_MissForceGeneric, BUILTIN, UNINITIALIZED, \
@@ -230,6 +232,8 @@ enum BuiltinExtraArguments {
DEBUG_BREAK) \
V(KeyedStoreIC_DebugBreak, KEYED_STORE_IC, DEBUG_STUB, \
DEBUG_BREAK) \
+ V(CompareNilIC_DebugBreak, COMPARE_NIL_IC, DEBUG_STUB, \
+ DEBUG_BREAK) \
V(Slot_DebugBreak, BUILTIN, DEBUG_STUB, \
DEBUG_BREAK) \
V(PlainReturn_LiveEdit, BUILTIN, DEBUG_STUB, \
@@ -274,8 +278,6 @@ enum BuiltinExtraArguments {
V(APPLY_PREPARE, 1) \
V(APPLY_OVERFLOW, 1)
-MaybeObject* ArrayConstructor_StubFailure(Arguments args, Isolate* isolate);
-
class BuiltinFunctionTable;
class ObjectVisitor;
diff --git a/deps/v8/src/code-stubs-hydrogen.cc b/deps/v8/src/code-stubs-hydrogen.cc
index b6720795b..31431b71c 100644
--- a/deps/v8/src/code-stubs-hydrogen.cc
+++ b/deps/v8/src/code-stubs-hydrogen.cc
@@ -82,6 +82,24 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
HContext* context() { return context_; }
Isolate* isolate() { return info_.isolate(); }
+ class ArrayContextChecker {
+ public:
+ ArrayContextChecker(HGraphBuilder* builder, HValue* constructor,
+ HValue* array_function)
+ : checker_(builder) {
+ checker_.If<HCompareObjectEqAndBranch, HValue*>(constructor,
+ array_function);
+ checker_.Then();
+ }
+
+ ~ArrayContextChecker() {
+ checker_.ElseDeopt();
+ checker_.End();
+ }
+ private:
+ IfBuilder checker_;
+ };
+
private:
SmartArrayPointer<HParameter*> parameters_;
HValue* arguments_length_;
@@ -240,7 +258,8 @@ Handle<Code> HydrogenCodeStub::GenerateLightweightMissCode(Isolate* isolate) {
GetCodeKind(),
GetICState(),
GetExtraICState(),
- GetStubType(), -1);
+ GetStubType(),
+ GetStubFlags());
Handle<Code> new_object = factory->NewCode(
desc, flags, masm.CodeObject(), NeedsImmovableCode());
return new_object;
@@ -290,8 +309,7 @@ HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
checker.Then();
if (mode == FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS) {
- HValue* elements =
- AddInstruction(new(zone) HLoadElements(boilerplate, NULL));
+ HValue* elements = AddLoadElements(boilerplate);
IfBuilder if_fixed_cow(this);
if_fixed_cow.IfCompareMap(elements, factory->fixed_cow_array_map());
@@ -377,11 +395,12 @@ HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
for (int i = 0; i < size; i += kPointerSize) {
HInstruction* value =
- AddInstruction(new(zone) HLoadNamedField(boilerplate, true, i));
+ AddInstruction(new(zone) HLoadNamedField(
+ boilerplate, true, Representation::Tagged(), i));
AddInstruction(new(zone) HStoreNamedField(object,
factory->empty_string(),
- value,
- true, i));
+ value, true,
+ Representation::Tagged(), i));
}
checker.ElseDeopt();
@@ -409,6 +428,36 @@ Handle<Code> KeyedLoadFastElementStub::GenerateCode() {
}
+template<>
+HValue* CodeStubGraphBuilder<LoadFieldStub>::BuildCodeStub() {
+ Representation representation = casted_stub()->representation();
+ HInstruction* load = AddInstruction(DoBuildLoadNamedField(
+ GetParameter(0), casted_stub()->is_inobject(),
+ representation, casted_stub()->offset()));
+ return load;
+}
+
+
+Handle<Code> LoadFieldStub::GenerateCode() {
+ return DoGenerateCode(this);
+}
+
+
+template<>
+HValue* CodeStubGraphBuilder<KeyedLoadFieldStub>::BuildCodeStub() {
+ Representation representation = casted_stub()->representation();
+ HInstruction* load = AddInstruction(DoBuildLoadNamedField(
+ GetParameter(0), casted_stub()->is_inobject(),
+ representation, casted_stub()->offset()));
+ return load;
+}
+
+
+Handle<Code> KeyedLoadFieldStub::GenerateCode() {
+ return DoGenerateCode(this);
+}
+
+
template <>
HValue* CodeStubGraphBuilder<KeyedStoreFastElementStub>::BuildCodeStub() {
BuildUncheckedMonomorphicElementAccess(
@@ -452,8 +501,7 @@ HValue* CodeStubGraphBuilder<TransitionElementsKindStub>::BuildCodeStub() {
if_builder.Else();
- HInstruction* elements =
- AddInstruction(new(zone) HLoadElements(js_array, js_array));
+ HInstruction* elements = AddLoadElements(js_array);
HInstruction* elements_length =
AddInstruction(new(zone) HFixedArrayBaseLength(elements));
@@ -470,12 +518,15 @@ HValue* CodeStubGraphBuilder<TransitionElementsKindStub>::BuildCodeStub() {
AddInstruction(new(zone) HStoreNamedField(js_array,
factory->elements_field_string(),
new_elements, true,
+ Representation::Tagged(),
JSArray::kElementsOffset));
if_builder.End();
AddInstruction(new(zone) HStoreNamedField(js_array, factory->length_string(),
- map, true, JSArray::kMapOffset));
+ map, true,
+ Representation::Tagged(),
+ JSArray::kMapOffset));
return js_array;
}
@@ -491,6 +542,10 @@ HValue* CodeStubGraphBuilder<ArrayNoArgumentConstructorStub>::BuildCodeStub() {
// -- Parameter 1 : type info cell
// -- Parameter 0 : constructor
// -----------------------------------
+ HInstruction* array_function = BuildGetArrayFunction(context());
+ ArrayContextChecker(this,
+ GetParameter(ArrayConstructorStubBase::kConstructor),
+ array_function);
// Get the right map
// Should be a constant
JSArrayBuilder array_builder(
@@ -510,6 +565,10 @@ Handle<Code> ArrayNoArgumentConstructorStub::GenerateCode() {
template <>
HValue* CodeStubGraphBuilder<ArraySingleArgumentConstructorStub>::
BuildCodeStub() {
+ HInstruction* array_function = BuildGetArrayFunction(context());
+ ArrayContextChecker(this,
+ GetParameter(ArrayConstructorStubBase::kConstructor),
+ array_function);
// Smi check and range check on the input arg.
HValue* constant_one = graph()->GetConstant1();
HValue* constant_zero = graph()->GetConstant0();
@@ -563,6 +622,10 @@ Handle<Code> ArraySingleArgumentConstructorStub::GenerateCode() {
template <>
HValue* CodeStubGraphBuilder<ArrayNArgumentsConstructorStub>::BuildCodeStub() {
+ HInstruction* array_function = BuildGetArrayFunction(context());
+ ArrayContextChecker(this,
+ GetParameter(ArrayConstructorStubBase::kConstructor),
+ array_function);
ElementsKind kind = casted_stub()->elements_kind();
HValue* length = GetArgumentsLength();
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index df9855d09..aa2c82172 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -431,7 +431,9 @@ CompareNilICStub::Types CompareNilICStub::GetPatchedICFlags(
} else if (object->IsUndefined()) {
types = static_cast<CompareNilICStub::Types>(
types | CompareNilICStub::kCompareAgainstUndefined);
- } else if (object->IsUndetectableObject() || !object->IsHeapObject()) {
+ } else if (object->IsUndetectableObject() ||
+ object->IsOddball() ||
+ !object->IsHeapObject()) {
types = CompareNilICStub::kFullCompare;
} else if ((types & CompareNilICStub::kCompareAgainstMonomorphicMap) != 0) {
types = CompareNilICStub::kFullCompare;
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index ea895d669..646aee23e 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -87,7 +87,8 @@ namespace internal {
V(ArrayConstructor) \
V(ProfileEntryHook) \
/* IC Handler stubs */ \
- V(LoadField)
+ V(LoadField) \
+ V(KeyedLoadField)
// List of code stubs only used on ARM platforms.
#ifdef V8_TARGET_ARCH_ARM
@@ -185,6 +186,12 @@ class CodeStub BASE_EMBEDDED {
virtual Code::ExtraICState GetExtraICState() {
return Code::kNoExtraICState;
}
+ virtual Code::StubType GetStubType() {
+ return Code::NORMAL;
+ }
+ virtual int GetStubFlags() {
+ return -1;
+ }
protected:
static bool CanUseFPRegisters();
@@ -192,9 +199,6 @@ class CodeStub BASE_EMBEDDED {
// Generates the assembler code for the stub.
virtual Handle<Code> GenerateCode() = 0;
- virtual Code::StubType GetStubType() {
- return Code::NORMAL;
- }
// Returns whether the code generated for this stub needs to be allocated as
// a fixed (non-moveable) code object.
@@ -253,7 +257,6 @@ class PlatformCodeStub : public CodeStub {
virtual Handle<Code> GenerateCode();
virtual Code::Kind GetCodeKind() const { return Code::STUB; }
- virtual int GetStubFlags() { return -1; }
protected:
// Generates the assembler code for the stub.
@@ -754,42 +757,108 @@ class StoreArrayLengthStub: public StoreICStub {
};
-class HandlerStub: public ICStub {
+class HICStub: public HydrogenCodeStub {
+ public:
+ virtual Code::Kind GetCodeKind() const { return kind(); }
+ virtual InlineCacheState GetICState() { return MONOMORPHIC; }
+
+ protected:
+ HICStub() : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS) { }
+ class KindBits: public BitField<Code::Kind, 0, 4> {};
+ virtual Code::Kind kind() const = 0;
+};
+
+
+class HandlerStub: public HICStub {
public:
- explicit HandlerStub(Code::Kind kind) : ICStub(kind) { }
virtual Code::Kind GetCodeKind() const { return Code::STUB; }
virtual int GetStubFlags() { return kind(); }
+
+ protected:
+ HandlerStub() : HICStub() { }
};
class LoadFieldStub: public HandlerStub {
public:
- LoadFieldStub(Register reg, bool inobject, int index)
- : HandlerStub(Code::LOAD_IC),
- reg_(reg),
- inobject_(inobject),
- index_(index) { }
- virtual void Generate(MacroAssembler* masm);
+ LoadFieldStub(bool inobject, int index, Representation representation)
+ : HandlerStub() {
+ Initialize(Code::LOAD_IC, inobject, index, representation);
+ }
+
+ virtual Handle<Code> GenerateCode();
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ Representation representation() {
+ if (unboxed_double()) return Representation::Double();
+ return Representation::Tagged();
+ }
+
+ virtual Code::Kind kind() const {
+ return KindBits::decode(bit_field_);
+ }
+
+ bool is_inobject() {
+ return InobjectBits::decode(bit_field_);
+ }
+
+ int offset() {
+ int index = IndexBits::decode(bit_field_);
+ int offset = index * kPointerSize;
+ if (is_inobject()) return offset;
+ return FixedArray::kHeaderSize + offset;
+ }
+
+ bool unboxed_double() {
+ return UnboxedDoubleBits::decode(bit_field_);
+ }
- protected:
virtual Code::StubType GetStubType() { return Code::FIELD; }
+ protected:
+ LoadFieldStub() : HandlerStub() { }
+
+ void Initialize(Code::Kind kind,
+ bool inobject,
+ int index,
+ Representation representation) {
+ bool unboxed_double = FLAG_track_double_fields && representation.IsDouble();
+ bit_field_ = KindBits::encode(kind)
+ | InobjectBits::encode(inobject)
+ | IndexBits::encode(index)
+ | UnboxedDoubleBits::encode(unboxed_double);
+ }
+
private:
STATIC_ASSERT(KindBits::kSize == 4);
- class RegisterBits: public BitField<int, 4, 6> {};
- class InobjectBits: public BitField<bool, 10, 1> {};
- class IndexBits: public BitField<int, 11, 11> {};
+ class InobjectBits: public BitField<bool, 4, 1> {};
+ class IndexBits: public BitField<int, 5, 11> {};
+ class UnboxedDoubleBits: public BitField<bool, 16, 1> {};
virtual CodeStub::Major MajorKey() { return LoadField; }
- virtual int MinorKey() {
- return KindBits::encode(kind())
- | RegisterBits::encode(reg_.code())
- | InobjectBits::encode(inobject_)
- | IndexBits::encode(index_);
+ virtual int NotMissMinorKey() { return bit_field_; }
+
+ int bit_field_;
+};
+
+
+class KeyedLoadFieldStub: public LoadFieldStub {
+ public:
+ KeyedLoadFieldStub(bool inobject, int index, Representation representation)
+ : LoadFieldStub() {
+ Initialize(Code::KEYED_LOAD_IC, inobject, index, representation);
}
- Register reg_;
- bool inobject_;
- int index_;
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ virtual Handle<Code> GenerateCode();
+
+ private:
+ virtual CodeStub::Major MajorKey() { return KeyedLoadField; }
};
@@ -1596,7 +1665,8 @@ class ArrayConstructorStubBase : public HydrogenCodeStub {
static void InstallDescriptors(Isolate* isolate);
// Parameters accessed via CodeStubGraphBuilder::GetParameter()
- static const int kPropertyCell = 0;
+ static const int kConstructor = 0;
+ static const int kPropertyCell = 1;
private:
int NotMissMinorKey() { return bit_field_; }
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index dce817129..b7ff92a7c 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -144,7 +144,8 @@ Code::Flags CompilationInfo::flags() const {
return Code::ComputeFlags(code_stub()->GetCodeKind(),
code_stub()->GetICState(),
code_stub()->GetExtraICState(),
- Code::NORMAL, -1);
+ code_stub()->GetStubType(),
+ code_stub()->GetStubFlags());
} else {
return Code::ComputeFlags(Code::OPTIMIZED_FUNCTION);
}
@@ -299,14 +300,14 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
//
// The encoding is as a signed value, with parameters and receiver using
// the negative indices and locals the non-negative ones.
- const int parameter_limit = -LUnallocated::kMinFixedIndex;
+ const int parameter_limit = -LUnallocated::kMinFixedSlotIndex;
Scope* scope = info()->scope();
if ((scope->num_parameters() + 1) > parameter_limit) {
info()->set_bailout_reason("too many parameters");
return AbortOptimization();
}
- const int locals_limit = LUnallocated::kMaxFixedIndex;
+ const int locals_limit = LUnallocated::kMaxFixedSlotIndex;
if (!info()->osr_ast_id().IsNone() &&
scope->num_parameters() + 1 + scope->num_stack_slots() > locals_limit) {
info()->set_bailout_reason("too many parameters/locals");
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index 0024e13d6..434b27414 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -124,6 +124,15 @@ enum BindingFlags {
V(INSTANTIATE_FUN_INDEX, JSFunction, instantiate_fun) \
V(CONFIGURE_INSTANCE_FUN_INDEX, JSFunction, configure_instance_fun) \
V(ARRAY_BUFFER_FUN_INDEX, JSFunction, array_buffer_fun) \
+ V(UINT8_ARRAY_FUN_INDEX, JSFunction, uint8_array_fun) \
+ V(INT8_ARRAY_FUN_INDEX, JSFunction, int8_array_fun) \
+ V(UINT16_ARRAY_FUN_INDEX, JSFunction, uint16_array_fun) \
+ V(INT16_ARRAY_FUN_INDEX, JSFunction, int16_array_fun) \
+ V(UINT32_ARRAY_FUN_INDEX, JSFunction, uint32_array_fun) \
+ V(INT32_ARRAY_FUN_INDEX, JSFunction, int32_array_fun) \
+ V(FLOAT_ARRAY_FUN_INDEX, JSFunction, float_array_fun) \
+ V(DOUBLE_ARRAY_FUN_INDEX, JSFunction, double_array_fun) \
+ V(UINT8C_ARRAY_FUN_INDEX, JSFunction, uint8c_array_fun) \
V(FUNCTION_MAP_INDEX, Map, function_map) \
V(STRICT_MODE_FUNCTION_MAP_INDEX, Map, strict_mode_function_map) \
V(FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, function_without_prototype_map) \
@@ -171,6 +180,7 @@ enum BindingFlags {
strict_mode_generator_function_map) \
V(GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX, Map, \
generator_object_prototype_map) \
+ V(GENERATOR_RESULT_MAP_INDEX, Map, generator_result_map) \
V(RANDOM_SEED_INDEX, ByteArray, random_seed)
// JSFunctions are pairs (context, function code), sometimes also called
@@ -278,6 +288,15 @@ class Context: public FixedArray {
INSTANTIATE_FUN_INDEX,
CONFIGURE_INSTANCE_FUN_INDEX,
ARRAY_BUFFER_FUN_INDEX,
+ UINT8_ARRAY_FUN_INDEX,
+ INT8_ARRAY_FUN_INDEX,
+ UINT16_ARRAY_FUN_INDEX,
+ INT16_ARRAY_FUN_INDEX,
+ UINT32_ARRAY_FUN_INDEX,
+ INT32_ARRAY_FUN_INDEX,
+ FLOAT_ARRAY_FUN_INDEX,
+ DOUBLE_ARRAY_FUN_INDEX,
+ UINT8C_ARRAY_FUN_INDEX,
MESSAGE_LISTENERS_INDEX,
MAKE_MESSAGE_FUN_INDEX,
GET_STACK_TRACE_LINE_INDEX,
@@ -305,6 +324,7 @@ class Context: public FixedArray {
GENERATOR_FUNCTION_MAP_INDEX,
STRICT_MODE_GENERATOR_FUNCTION_MAP_INDEX,
GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX,
+ GENERATOR_RESULT_MAP_INDEX,
RANDOM_SEED_INDEX,
// Properties from here are treated as weak references by the full GC.
diff --git a/deps/v8/src/d8-debug.cc b/deps/v8/src/d8-debug.cc
index a20de43b7..e1d29d98e 100644
--- a/deps/v8/src/d8-debug.cc
+++ b/deps/v8/src/d8-debug.cc
@@ -77,7 +77,7 @@ void HandleDebugEvent(DebugEvent event,
// Print the event details.
Handle<Object> details =
- Shell::DebugMessageDetails(Handle<String>::Cast(event_json));
+ Shell::DebugMessageDetails(isolate, Handle<String>::Cast(event_json));
if (try_catch.HasCaught()) {
Shell::ReportException(isolate, &try_catch);
return;
@@ -114,7 +114,7 @@ void HandleDebugEvent(DebugEvent event,
// Convert the debugger command to a JSON debugger request.
Handle<Value> request =
- Shell::DebugCommandToJSONRequest(String::New(command));
+ Shell::DebugCommandToJSONRequest(isolate, String::New(command));
if (try_catch.HasCaught()) {
Shell::ReportException(isolate, &try_catch);
continue;
@@ -146,7 +146,8 @@ void HandleDebugEvent(DebugEvent event,
Handle<String> response = Handle<String>::Cast(response_val);
// Convert the debugger response into text details and the running state.
- Handle<Object> response_details = Shell::DebugMessageDetails(response);
+ Handle<Object> response_details =
+ Shell::DebugMessageDetails(isolate, response);
if (try_catch.HasCaught()) {
Shell::ReportException(isolate, &try_catch);
continue;
@@ -281,7 +282,8 @@ void RemoteDebugger::HandleMessageReceived(char* message) {
// Print the event details.
TryCatch try_catch;
Handle<Object> details =
- Shell::DebugMessageDetails(Handle<String>::Cast(String::New(message)));
+ Shell::DebugMessageDetails(isolate_,
+ Handle<String>::Cast(String::New(message)));
if (try_catch.HasCaught()) {
Shell::ReportException(isolate_, &try_catch);
PrintPrompt();
@@ -310,7 +312,7 @@ void RemoteDebugger::HandleKeyboardCommand(char* command) {
// Convert the debugger command to a JSON debugger request.
TryCatch try_catch;
Handle<Value> request =
- Shell::DebugCommandToJSONRequest(String::New(command));
+ Shell::DebugCommandToJSONRequest(isolate_, String::New(command));
if (try_catch.HasCaught()) {
Shell::ReportException(isolate_, &try_catch);
PrintPrompt();
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index 22ace174d..1889556e1 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -40,6 +40,11 @@
#include <string.h>
#include <sys/stat.h>
+// TODO(dcarney): remove
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
+#define V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
+
#ifdef V8_SHARED
#include <assert.h>
#endif // V8_SHARED
@@ -124,8 +129,8 @@ class PerIsolateData {
}
#define DEFINE_STRING_GETTER(name, value) \
- static Persistent<String> name##_string(Isolate* isolate) { \
- return Get(isolate)->name##_string_; \
+ static Handle<String> name##_string(Isolate* isolate) { \
+ return Handle<String>(*Get(isolate)->name##_string_); \
}
FOR_EACH_STRING(DEFINE_STRING_GETTER)
#undef DEFINE_STRING_GETTER
@@ -245,7 +250,7 @@ bool Shell::ExecuteString(Isolate* isolate,
} else {
PerIsolateData* data = PerIsolateData::Get(isolate);
Local<Context> realm =
- Local<Context>::New(data->realms_[data->realm_current_]);
+ Local<Context>::New(isolate, data->realms_[data->realm_current_]);
realm->Enter();
Handle<Value> result = script->Run();
realm->Exit();
@@ -272,7 +277,7 @@ bool Shell::ExecuteString(Isolate* isolate,
#if !defined(V8_SHARED)
} else {
v8::TryCatch try_catch;
- Context::Scope context_scope(utility_context_);
+ Context::Scope context_scope(isolate, utility_context_);
Handle<Object> global = utility_context_->Global();
Handle<Value> fun = global->Get(String::New("Stringify"));
Handle<Value> argv[1] = { result };
@@ -421,7 +426,7 @@ Handle<Value> Shell::RealmEval(const Arguments& args) {
}
Handle<Script> script = Script::New(args[1]->ToString());
if (script.IsEmpty()) return Undefined(isolate);
- Local<Context> realm = Local<Context>::New(data->realms_[index]);
+ Local<Context> realm = Local<Context>::New(isolate, data->realms_[index]);
realm->Enter();
Handle<Value> result = script->Run();
realm->Exit();
@@ -435,7 +440,7 @@ Handle<Value> Shell::RealmSharedGet(Local<String> property,
Isolate* isolate = info.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
if (data->realm_shared_.IsEmpty()) return Undefined(isolate);
- return data->realm_shared_;
+ return Local<Value>::New(isolate, data->realm_shared_);
}
void Shell::RealmSharedSet(Local<String> property,
@@ -1057,14 +1062,14 @@ Handle<Value> Shell::ArraySet(const Arguments& args) {
void Shell::ExternalArrayWeakCallback(v8::Isolate* isolate,
- Persistent<Value> object,
- void* data) {
+ Persistent<Object>* object,
+ uint8_t* data) {
HandleScope scope(isolate);
- int32_t length = object->ToObject()->Get(
+ int32_t length = (*object)->Get(
PerIsolateData::byteLength_string(isolate))->Uint32Value();
isolate->AdjustAmountOfExternalAllocatedMemory(-length);
- delete[] static_cast<uint8_t*>(data);
- object.Dispose(isolate);
+ delete[] data;
+ object->Dispose(isolate);
}
@@ -1180,7 +1185,7 @@ Handle<Array> Shell::GetCompletions(Isolate* isolate,
Handle<String> text,
Handle<String> full) {
HandleScope handle_scope(isolate);
- Context::Scope context_scope(utility_context_);
+ Context::Scope context_scope(isolate, utility_context_);
Handle<Object> global = utility_context_->Global();
Handle<Value> fun = global->Get(String::New("GetCompletions"));
static const int kArgc = 3;
@@ -1191,8 +1196,10 @@ Handle<Array> Shell::GetCompletions(Isolate* isolate,
#ifdef ENABLE_DEBUGGER_SUPPORT
-Handle<Object> Shell::DebugMessageDetails(Handle<String> message) {
- Context::Scope context_scope(utility_context_);
+Handle<Object> Shell::DebugMessageDetails(Isolate* isolate,
+ Handle<String> message) {
+ HandleScope handle_scope(isolate);
+ Context::Scope context_scope(isolate, utility_context_);
Handle<Object> global = utility_context_->Global();
Handle<Value> fun = global->Get(String::New("DebugMessageDetails"));
static const int kArgc = 1;
@@ -1202,8 +1209,10 @@ Handle<Object> Shell::DebugMessageDetails(Handle<String> message) {
}
-Handle<Value> Shell::DebugCommandToJSONRequest(Handle<String> command) {
- Context::Scope context_scope(utility_context_);
+Handle<Value> Shell::DebugCommandToJSONRequest(Isolate* isolate,
+ Handle<String> command) {
+ HandleScope handle_scope(isolate);
+ Context::Scope context_scope(isolate, utility_context_);
Handle<Object> global = utility_context_->Global();
Handle<Value> fun = global->Get(String::New("DebugCommandToJSONRequest"));
static const int kArgc = 1;
@@ -1214,7 +1223,9 @@ Handle<Value> Shell::DebugCommandToJSONRequest(Handle<String> command) {
void Shell::DispatchDebugMessages() {
- v8::Context::Scope scope(Shell::evaluation_context_);
+ Isolate* isolate = v8::Isolate::GetCurrent();
+ HandleScope handle_scope(isolate);
+ v8::Context::Scope scope(isolate, Shell::evaluation_context_);
v8::Debug::ProcessDebugMessages();
}
#endif // ENABLE_DEBUGGER_SUPPORT
@@ -1327,7 +1338,7 @@ void Shell::InstallUtilityScript(Isolate* isolate) {
// utility, evaluation and debug context can all access each other.
utility_context_->SetSecurityToken(Undefined(isolate));
evaluation_context_->SetSecurityToken(Undefined(isolate));
- Context::Scope utility_scope(utility_context_);
+ Context::Scope utility_scope(isolate, utility_context_);
#ifdef ENABLE_DEBUGGER_SUPPORT
if (i::FLAG_debugger) printf("JavaScript debugger enabled\n");
@@ -1459,28 +1470,34 @@ Handle<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
global_template->Set(String::New("Realm"), realm_template);
// Bind the handlers for external arrays.
- PropertyAttribute attr =
- static_cast<PropertyAttribute>(ReadOnly | DontDelete);
- global_template->Set(PerIsolateData::ArrayBuffer_string(isolate),
- CreateArrayBufferTemplate(ArrayBuffer), attr);
- global_template->Set(String::New("Int8Array"),
- CreateArrayTemplate(Int8Array), attr);
- global_template->Set(String::New("Uint8Array"),
- CreateArrayTemplate(Uint8Array), attr);
- global_template->Set(String::New("Int16Array"),
- CreateArrayTemplate(Int16Array), attr);
- global_template->Set(String::New("Uint16Array"),
- CreateArrayTemplate(Uint16Array), attr);
- global_template->Set(String::New("Int32Array"),
- CreateArrayTemplate(Int32Array), attr);
- global_template->Set(String::New("Uint32Array"),
- CreateArrayTemplate(Uint32Array), attr);
- global_template->Set(String::New("Float32Array"),
- CreateArrayTemplate(Float32Array), attr);
- global_template->Set(String::New("Float64Array"),
- CreateArrayTemplate(Float64Array), attr);
- global_template->Set(String::New("Uint8ClampedArray"),
- CreateArrayTemplate(Uint8ClampedArray), attr);
+#ifndef V8_SHARED
+ if (!i::FLAG_harmony_typed_arrays) {
+#endif // V8_SHARED
+ PropertyAttribute attr =
+ static_cast<PropertyAttribute>(ReadOnly | DontDelete);
+ global_template->Set(PerIsolateData::ArrayBuffer_string(isolate),
+ CreateArrayBufferTemplate(ArrayBuffer), attr);
+ global_template->Set(String::New("Int8Array"),
+ CreateArrayTemplate(Int8Array), attr);
+ global_template->Set(String::New("Uint8Array"),
+ CreateArrayTemplate(Uint8Array), attr);
+ global_template->Set(String::New("Int16Array"),
+ CreateArrayTemplate(Int16Array), attr);
+ global_template->Set(String::New("Uint16Array"),
+ CreateArrayTemplate(Uint16Array), attr);
+ global_template->Set(String::New("Int32Array"),
+ CreateArrayTemplate(Int32Array), attr);
+ global_template->Set(String::New("Uint32Array"),
+ CreateArrayTemplate(Uint32Array), attr);
+ global_template->Set(String::New("Float32Array"),
+ CreateArrayTemplate(Float32Array), attr);
+ global_template->Set(String::New("Float64Array"),
+ CreateArrayTemplate(Float64Array), attr);
+ global_template->Set(String::New("Uint8ClampedArray"),
+ CreateArrayTemplate(Uint8ClampedArray), attr);
+#ifndef V8_SHARED
+ }
+#endif // V8_SHARED
#if !defined(V8_SHARED) && !defined(_WIN32) && !defined(_WIN64)
Handle<ObjectTemplate> os_templ = ObjectTemplate::New();
@@ -1522,7 +1539,8 @@ void Shell::InitializeDebugger(Isolate* isolate) {
Locker lock(isolate);
HandleScope scope(isolate);
Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
- utility_context_ = Context::New(NULL, global_template);
+ utility_context_.Reset(isolate,
+ Context::New(isolate, NULL, global_template));
#ifdef ENABLE_DEBUGGER_SUPPORT
// Start the debugger agent if requested.
@@ -1535,14 +1553,15 @@ void Shell::InitializeDebugger(Isolate* isolate) {
}
-Persistent<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
+Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
#ifndef V8_SHARED
// This needs to be a critical section since this is not thread-safe
i::ScopedLock lock(context_mutex_);
#endif // V8_SHARED
// Initialize the global objects
Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
- Persistent<Context> context = Context::New(NULL, global_template);
+ HandleScope handle_scope(isolate);
+ Local<Context> context = Context::New(isolate, NULL, global_template);
ASSERT(!context.IsEmpty());
Context::Scope scope(context);
@@ -1560,7 +1579,7 @@ Persistent<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
context->Global()->Set(String::New("arguments"),
Utils::ToLocal(arguments_jsarray));
#endif // V8_SHARED
- return context;
+ return handle_scope.Close(context);
}
@@ -1740,9 +1759,9 @@ Handle<String> Shell::ReadFile(Isolate* isolate, const char* name) {
void Shell::RunShell(Isolate* isolate) {
Locker locker(isolate);
- Context::Scope context_scope(evaluation_context_);
- PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
HandleScope outer_scope(isolate);
+ Context::Scope context_scope(isolate, evaluation_context_);
+ PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
Handle<String> name = String::New("(d8)");
LineEditor* console = LineEditor::Get();
printf("V8 version %s [console: %s]\n", V8::GetVersion(), console->name());
@@ -1791,7 +1810,7 @@ void ShellThread::Run() {
// Prepare the context for this thread.
Locker locker(isolate_);
HandleScope outer_scope(isolate_);
- Persistent<Context> thread_context =
+ Local<Context> thread_context =
Shell::CreateEvaluationContext(isolate_);
Context::Scope context_scope(thread_context);
PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate_));
@@ -1815,7 +1834,6 @@ void ShellThread::Run() {
Shell::ExecuteString(isolate_, str, String::New(filename), false, false);
}
- thread_context.Dispose(thread_context->GetIsolate());
ptr = next_line;
}
}
@@ -1892,15 +1910,16 @@ void SourceGroup::ExecuteInThread() {
{
Isolate::Scope iscope(isolate);
Locker lock(isolate);
- HandleScope scope(isolate);
- PerIsolateData data(isolate);
- Persistent<Context> context = Shell::CreateEvaluationContext(isolate);
{
- Context::Scope cscope(context);
- PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
- Execute(isolate);
+ HandleScope scope(isolate);
+ PerIsolateData data(isolate);
+ Local<Context> context = Shell::CreateEvaluationContext(isolate);
+ {
+ Context::Scope cscope(context);
+ PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
+ Execute(isolate);
+ }
}
- context.Dispose(isolate);
if (Shell::options.send_idle_notification) {
const int kLongIdlePauseInMs = 1000;
V8::ContextDisposedNotification();
@@ -2091,26 +2110,27 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[]) {
#endif // V8_SHARED
{ // NOLINT
Locker lock(isolate);
- HandleScope scope(isolate);
- Persistent<Context> context = CreateEvaluationContext(isolate);
- if (options.last_run) {
- // Keep using the same context in the interactive shell.
- evaluation_context_ = context;
+ {
+ HandleScope scope(isolate);
+ Local<Context> context = CreateEvaluationContext(isolate);
+ if (options.last_run) {
+ // Keep using the same context in the interactive shell.
+ evaluation_context_.Reset(isolate, context);
#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
- // If the interactive debugger is enabled make sure to activate
- // it before running the files passed on the command line.
- if (i::FLAG_debugger) {
- InstallUtilityScript(isolate);
- }
+ // If the interactive debugger is enabled make sure to activate
+ // it before running the files passed on the command line.
+ if (i::FLAG_debugger) {
+ InstallUtilityScript(isolate);
+ }
#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
- }
- {
- Context::Scope cscope(context);
- PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
- options.isolate_sources[0].Execute(isolate);
+ }
+ {
+ Context::Scope cscope(context);
+ PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
+ options.isolate_sources[0].Execute(isolate);
+ }
}
if (!options.last_run) {
- context.Dispose(isolate);
if (options.send_idle_notification) {
const int kLongIdlePauseInMs = 1000;
V8::ContextDisposedNotification();
@@ -2155,7 +2175,7 @@ int Shell::Main(int argc, char* argv[]) {
{
Initialize(isolate);
#ifdef ENABLE_VTUNE_JIT_INTERFACE
- vTune::InitilizeVtuneForV8();
+ vTune::InitializeVtuneForV8();
#endif
PerIsolateData data(isolate);
InitializeDebugger(isolate);
diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h
index 4d9504f0d..c068dd9db 100644
--- a/deps/v8/src/d8.h
+++ b/deps/v8/src/d8.h
@@ -273,7 +273,7 @@ class Shell : public i::AllStatic {
static const char* ToCString(const v8::String::Utf8Value& value);
static void ReportException(Isolate* isolate, TryCatch* try_catch);
static Handle<String> ReadFile(Isolate* isolate, const char* name);
- static Persistent<Context> CreateEvaluationContext(Isolate* isolate);
+ static Local<Context> CreateEvaluationContext(Isolate* isolate);
static int RunMain(Isolate* isolate, int argc, char* argv[]);
static int Main(int argc, char* argv[]);
static void Exit(int exit_code);
@@ -292,8 +292,10 @@ class Shell : public i::AllStatic {
static void MapCounters(const char* name);
#ifdef ENABLE_DEBUGGER_SUPPORT
- static Handle<Object> DebugMessageDetails(Handle<String> message);
- static Handle<Value> DebugCommandToJSONRequest(Handle<String> command);
+ static Handle<Object> DebugMessageDetails(Isolate* isolate,
+ Handle<String> message);
+ static Handle<Value> DebugCommandToJSONRequest(Isolate* isolate,
+ Handle<String> command);
static void DispatchDebugMessages();
#endif // ENABLE_DEBUGGER_SUPPORT
#endif // V8_SHARED
@@ -414,8 +416,8 @@ class Shell : public i::AllStatic {
ExternalArrayType type,
int32_t element_size);
static void ExternalArrayWeakCallback(Isolate* isolate,
- Persistent<Value> object,
- void* data);
+ Persistent<Object>* object,
+ uint8_t* data);
};
diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc
index efba8e585..02ec1248f 100644
--- a/deps/v8/src/debug.cc
+++ b/deps/v8/src/debug.cc
@@ -76,12 +76,12 @@ Debug::~Debug() {
static void PrintLn(v8::Local<v8::Value> value) {
v8::Local<v8::String> s = value->ToString();
- ScopedVector<char> data(s->Length() + 1);
+ ScopedVector<char> data(s->Utf8Length() + 1);
if (data.start() == NULL) {
V8::FatalProcessOutOfMemory("PrintLn");
return;
}
- s->WriteAscii(data.start());
+ s->WriteUtf8(data.start());
PrintF("%s\n", data.start());
}
@@ -1644,6 +1644,9 @@ Handle<Code> Debug::FindDebugBreak(Handle<Code> code, RelocInfo::Mode mode) {
case Code::KEYED_STORE_IC:
return isolate->builtins()->KeyedStoreIC_DebugBreak();
+ case Code::COMPARE_NIL_IC:
+ return isolate->builtins()->CompareNilIC_DebugBreak();
+
default:
UNREACHABLE();
}
diff --git a/deps/v8/src/debug.h b/deps/v8/src/debug.h
index 459073fe0..ccdc0c05e 100644
--- a/deps/v8/src/debug.h
+++ b/deps/v8/src/debug.h
@@ -418,6 +418,7 @@ class Debug {
static void GenerateStoreICDebugBreak(MacroAssembler* masm);
static void GenerateKeyedLoadICDebugBreak(MacroAssembler* masm);
static void GenerateKeyedStoreICDebugBreak(MacroAssembler* masm);
+ static void GenerateCompareNilICDebugBreak(MacroAssembler* masm);
static void GenerateReturnDebugBreak(MacroAssembler* masm);
static void GenerateCallFunctionStubDebugBreak(MacroAssembler* masm);
static void GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm);
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index f36006c11..fe71a225b 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -660,6 +660,14 @@ Handle<Object> Factory::NewNumberFromUint(uint32_t value,
}
+Handle<HeapNumber> Factory::NewHeapNumber(double value,
+ PretenureFlag pretenure) {
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateHeapNumber(value, pretenure), HeapNumber);
+}
+
+
Handle<JSObject> Factory::NewNeanderObject() {
CALL_HEAP_FUNCTION(
isolate(),
@@ -1056,6 +1064,58 @@ Handle<JSArrayBuffer> Factory::NewJSArrayBuffer() {
}
+Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type) {
+ JSFunction* typed_array_fun;
+ Context* native_context = isolate()->context()->native_context();
+ switch (type) {
+ case kExternalUnsignedByteArray:
+ typed_array_fun = native_context->uint8_array_fun();
+ break;
+
+ case kExternalByteArray:
+ typed_array_fun = native_context->int8_array_fun();
+ break;
+
+ case kExternalUnsignedShortArray:
+ typed_array_fun = native_context->uint16_array_fun();
+ break;
+
+ case kExternalShortArray:
+ typed_array_fun = native_context->int16_array_fun();
+ break;
+
+ case kExternalUnsignedIntArray:
+ typed_array_fun = native_context->uint32_array_fun();
+ break;
+
+ case kExternalIntArray:
+ typed_array_fun = native_context->int32_array_fun();
+ break;
+
+ case kExternalFloatArray:
+ typed_array_fun = native_context->float_array_fun();
+ break;
+
+ case kExternalDoubleArray:
+ typed_array_fun = native_context->double_array_fun();
+ break;
+
+ case kExternalPixelArray:
+ typed_array_fun = native_context->uint8c_array_fun();
+ break;
+
+ default:
+ UNREACHABLE();
+ return Handle<JSTypedArray>();
+ }
+
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateJSObject(typed_array_fun),
+ JSTypedArray);
+}
+
+
Handle<JSProxy> Factory::NewJSProxy(Handle<Object> handler,
Handle<Object> prototype) {
CALL_HEAP_FUNCTION(
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index caac78df4..5e8970835 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -267,6 +267,9 @@ class Factory {
Handle<Object> NewNumberFromUint(uint32_t value,
PretenureFlag pretenure = NOT_TENURED);
+ Handle<HeapNumber> NewHeapNumber(double value,
+ PretenureFlag pretenure = NOT_TENURED);
+
// These objects are used by the api to create env-independent data
// structures in the heap.
Handle<JSObject> NewNeanderObject();
@@ -315,6 +318,8 @@ class Factory {
Handle<JSArrayBuffer> NewJSArrayBuffer();
+ Handle<JSTypedArray> NewJSTypedArray(ExternalArrayType type);
+
Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype);
// Change the type of the argument into a JS object/function and reinitialize.
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index 0a6bf6762..d5d58a7fa 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -166,6 +166,9 @@ DEFINE_bool(harmony_observation, false,
"enable harmony object observation (implies harmony collections")
DEFINE_bool(harmony_typed_arrays, false,
"enable harmony typed arrays")
+DEFINE_bool(harmony_array_buffer, false,
+ "enable harmony array buffer")
+DEFINE_implication(harmony_typed_arrays, harmony_array_buffer)
DEFINE_bool(harmony_generators, false, "enable harmony generators")
DEFINE_bool(harmony, false, "enable all harmony features (except typeof)")
DEFINE_implication(harmony, harmony_scoping)
@@ -177,7 +180,7 @@ DEFINE_implication(harmony, harmony_observation)
DEFINE_implication(harmony, harmony_generators)
DEFINE_implication(harmony_modules, harmony_scoping)
DEFINE_implication(harmony_observation, harmony_collections)
-DEFINE_implication(harmony, harmony_typed_arrays)
+// TODO[dslomov] add harmony => harmony_typed_arrays
// Flags for experimental implementation features.
DEFINE_bool(packed_arrays, true, "optimizes arrays that have no holes")
@@ -190,6 +193,9 @@ DEFINE_bool(clever_optimizations,
true,
"Optimize object size, Array shift, DOM strings and string +")
DEFINE_bool(pretenure_literals, true, "allocate literals in old space")
+DEFINE_bool(track_fields, false, "track fields with only smi values")
+DEFINE_bool(track_double_fields, false, "track fields with double values")
+DEFINE_implication(track_double_fields, track_fields)
// Flags for data representation optimizations
DEFINE_bool(unbox_double_arrays, true, "automatically unbox arrays of doubles")
@@ -225,6 +231,8 @@ DEFINE_bool(trace_gvn, false, "trace global value numbering")
DEFINE_bool(trace_representation, false, "trace representation types")
DEFINE_bool(trace_track_allocation_sites, false,
"trace the tracking of allocation sites")
+DEFINE_bool(trace_migration, false, "trace object migration")
+DEFINE_bool(trace_generalization, false, "trace map generalization")
DEFINE_bool(stress_pointer_maps, false, "pointer map for every instruction")
DEFINE_bool(stress_environments, false, "environment for every instruction")
DEFINE_int(deopt_every_n_times,
diff --git a/deps/v8/src/frames-inl.h b/deps/v8/src/frames-inl.h
index 83b37a5fe..1228ccf18 100644
--- a/deps/v8/src/frames-inl.h
+++ b/deps/v8/src/frames-inl.h
@@ -98,6 +98,12 @@ inline StackHandler::Kind StackHandler::kind() const {
}
+inline unsigned StackHandler::index() const {
+ const int offset = StackHandlerConstants::kStateOffset;
+ return IndexField::decode(Memory::unsigned_at(address() + offset));
+}
+
+
inline Object** StackHandler::context_address() const {
const int offset = StackHandlerConstants::kContextOffset;
return reinterpret_cast<Object**>(address() + offset);
@@ -213,6 +219,34 @@ Object* JavaScriptFrame::GetParameter(int index) const {
}
+inline Address JavaScriptFrame::GetOperandSlot(int index) const {
+ Address base = fp() + JavaScriptFrameConstants::kLocal0Offset;
+ ASSERT(IsAddressAligned(base, kPointerSize));
+ ASSERT_EQ(type(), JAVA_SCRIPT);
+ ASSERT_LT(index, ComputeOperandsCount());
+ ASSERT_LE(0, index);
+ // Operand stack grows down.
+ return base - index * kPointerSize;
+}
+
+
+inline Object* JavaScriptFrame::GetOperand(int index) const {
+ return Memory::Object_at(GetOperandSlot(index));
+}
+
+
+inline int JavaScriptFrame::ComputeOperandsCount() const {
+ Address base = fp() + JavaScriptFrameConstants::kLocal0Offset;
+ // Base points to low address of first operand and stack grows down, so add
+ // kPointerSize to get the actual stack size.
+ intptr_t stack_size_in_bytes = (base + kPointerSize) - sp();
+ ASSERT(IsAligned(stack_size_in_bytes, kPointerSize));
+ ASSERT(type() == JAVA_SCRIPT);
+ ASSERT(stack_size_in_bytes >= 0);
+ return static_cast<int>(stack_size_in_bytes >> kPointerSizeLog2);
+}
+
+
inline Object* JavaScriptFrame::receiver() const {
return GetParameter(-1);
}
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index aaf8c79e2..152cd30d5 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -840,6 +840,72 @@ void JavaScriptFrame::PrintTop(Isolate* isolate,
}
+void JavaScriptFrame::SaveOperandStack(FixedArray* store,
+ int* stack_handler_index) const {
+ int operands_count = store->length();
+ ASSERT_LE(operands_count, ComputeOperandsCount());
+
+ // Visit the stack in LIFO order, saving operands and stack handlers into the
+ // array. The saved stack handlers store a link to the next stack handler,
+ // which will allow RestoreOperandStack to rewind the handlers.
+ StackHandlerIterator it(this, top_handler());
+ int i = operands_count - 1;
+ *stack_handler_index = -1;
+ for (; !it.done(); it.Advance()) {
+ StackHandler* handler = it.handler();
+ // Save operands pushed after the handler was pushed.
+ for (; GetOperandSlot(i) < handler->address(); i--) {
+ store->set(i, GetOperand(i));
+ }
+ ASSERT_GE(i + 1, StackHandlerConstants::kSlotCount);
+ ASSERT_EQ(handler->address(), GetOperandSlot(i));
+ int next_stack_handler_index = i + 1 - StackHandlerConstants::kSlotCount;
+ handler->Unwind(isolate(), store, next_stack_handler_index,
+ *stack_handler_index);
+ *stack_handler_index = next_stack_handler_index;
+ i -= StackHandlerConstants::kSlotCount;
+ }
+
+ // Save any remaining operands.
+ for (; i >= 0; i--) {
+ store->set(i, GetOperand(i));
+ }
+}
+
+
+void JavaScriptFrame::RestoreOperandStack(FixedArray* store,
+ int stack_handler_index) {
+ int operands_count = store->length();
+ ASSERT_LE(operands_count, ComputeOperandsCount());
+ int i = 0;
+ while (i <= stack_handler_index) {
+ if (i < stack_handler_index) {
+ // An operand.
+ ASSERT_EQ(GetOperand(i), isolate()->heap()->the_hole_value());
+ Memory::Object_at(GetOperandSlot(i)) = store->get(i);
+ i++;
+ } else {
+ // A stack handler.
+ ASSERT_EQ(i, stack_handler_index);
+ // The FixedArray store grows up. The stack grows down. So the operand
+ // slot for i actually points to the bottom of the top word in the
+ // handler. The base of the StackHandler* is the address of the bottom
+ // word, which will be the last slot that is in the handler.
+ int handler_slot_index = i + StackHandlerConstants::kSlotCount - 1;
+ StackHandler *handler =
+ StackHandler::FromAddress(GetOperandSlot(handler_slot_index));
+ stack_handler_index = handler->Rewind(isolate(), store, i, fp());
+ i += StackHandlerConstants::kSlotCount;
+ }
+ }
+
+ for (; i < operands_count; i++) {
+ ASSERT_EQ(GetOperand(i), isolate()->heap()->the_hole_value());
+ Memory::Object_at(GetOperandSlot(i)) = store->get(i);
+ }
+}
+
+
void FrameSummary::Print() {
PrintF("receiver: ");
receiver_->ShortPrint();
@@ -1438,6 +1504,60 @@ InnerPointerToCodeCache::InnerPointerToCodeCacheEntry*
// -------------------------------------------------------------------------
+
+void StackHandler::Unwind(Isolate* isolate,
+ FixedArray* array,
+ int offset,
+ int previous_handler_offset) const {
+ STATIC_ASSERT(StackHandlerConstants::kSlotCount == 5);
+ ASSERT_LE(0, offset);
+ ASSERT_GE(array->length(), offset + 5);
+ // Unwinding a stack handler into an array chains it in the opposite
+ // direction, re-using the "next" slot as a "previous" link, so that stack
+ // handlers can be later re-wound in the correct order. Decode the "state"
+ // slot into "index" and "kind" and store them separately, using the fp slot.
+ array->set(offset, Smi::FromInt(previous_handler_offset)); // next
+ array->set(offset + 1, *code_address()); // code
+ array->set(offset + 2, Smi::FromInt(static_cast<int>(index()))); // state
+ array->set(offset + 3, *context_address()); // context
+ array->set(offset + 4, Smi::FromInt(static_cast<int>(kind()))); // fp
+
+ *isolate->handler_address() = next()->address();
+}
+
+
+int StackHandler::Rewind(Isolate* isolate,
+ FixedArray* array,
+ int offset,
+ Address fp) {
+ STATIC_ASSERT(StackHandlerConstants::kSlotCount == 5);
+ ASSERT_LE(0, offset);
+ ASSERT_GE(array->length(), offset + 5);
+ Smi* prev_handler_offset = Smi::cast(array->get(offset));
+ Code* code = Code::cast(array->get(offset + 1));
+ Smi* smi_index = Smi::cast(array->get(offset + 2));
+ Object* context = array->get(offset + 3);
+ Smi* smi_kind = Smi::cast(array->get(offset + 4));
+
+ unsigned state = KindField::encode(static_cast<Kind>(smi_kind->value())) |
+ IndexField::encode(static_cast<unsigned>(smi_index->value()));
+
+ Memory::Address_at(address() + StackHandlerConstants::kNextOffset) =
+ *isolate->handler_address();
+ Memory::Object_at(address() + StackHandlerConstants::kCodeOffset) = code;
+ Memory::uintptr_at(address() + StackHandlerConstants::kStateOffset) = state;
+ Memory::Object_at(address() + StackHandlerConstants::kContextOffset) =
+ context;
+ Memory::Address_at(address() + StackHandlerConstants::kFPOffset) = fp;
+
+ *isolate->handler_address() = address();
+
+ return prev_handler_offset->value();
+}
+
+
+// -------------------------------------------------------------------------
+
int NumRegs(RegList reglist) {
return CompilerIntrinsics::CountSetBits(reglist);
}
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index 11e8d2878..3c44f5e5b 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -93,6 +93,7 @@ class StackHandlerConstants : public AllStatic {
static const int kFPOffset = 4 * kPointerSize;
static const int kSize = kFPOffset + kPointerSize;
+ static const int kSlotCount = kSize >> kPointerSizeLog2;
};
@@ -131,9 +132,15 @@ class StackHandler BASE_EMBEDDED {
inline bool is_catch() const;
inline bool is_finally() const;
+ // Generator support to preserve stack handlers.
+ void Unwind(Isolate* isolate, FixedArray* array, int offset,
+ int previous_handler_offset) const;
+ int Rewind(Isolate* isolate, FixedArray* array, int offset, Address fp);
+
private:
// Accessors.
inline Kind kind() const;
+ inline unsigned index() const;
inline Object** context_address() const;
inline Object** code_address() const;
@@ -536,6 +543,15 @@ class JavaScriptFrame: public StandardFrame {
return GetNumberOfIncomingArguments();
}
+ // Access the operand stack.
+ inline Address GetOperandSlot(int index) const;
+ inline Object* GetOperand(int index) const;
+ inline int ComputeOperandsCount() const;
+
+ // Generator support to preserve operand stack and stack handlers.
+ void SaveOperandStack(FixedArray* store, int* stack_handler_index) const;
+ void RestoreOperandStack(FixedArray* store, int stack_handler_index);
+
// Debugger access.
void SetParameterValue(int index, Object* value) const;
diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h
index 3734ae526..32242b297 100644
--- a/deps/v8/src/full-codegen.h
+++ b/deps/v8/src/full-codegen.h
@@ -410,6 +410,11 @@ class FullCodeGenerator: public AstVisitor {
// this has to be a separate pass _before_ populating or executing any module.
void AllocateModules(ZoneList<Declaration*>* declarations);
+ // Generator code to return a fresh iterator result object. The "value"
+ // property is set to a value popped from the stack, and "done" is set
+ // according to the argument.
+ void EmitReturnIteratorResult(bool done);
+
// Try to perform a comparison as a fast inlined literal compare if
// the operands allow it. Returns true if the compare operations
// has been matched and all code generated; false otherwise.
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc
index 7ee89d7b2..29432bb5b 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/global-handles.cc
@@ -25,6 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// TODO(dcarney): remove
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
+
#include "v8.h"
#include "api.h"
@@ -232,7 +235,7 @@ class GlobalHandles::Node {
void MakeWeak(GlobalHandles* global_handles,
void* parameter,
- WeakReferenceCallback weak_reference_callback,
+ RevivableCallback weak_reference_callback,
NearDeathCallback near_death_callback) {
ASSERT(state() != FREE);
set_state(WEAK);
@@ -264,7 +267,7 @@ class GlobalHandles::Node {
set_state(NEAR_DEATH);
set_parameter(NULL);
- v8::Persistent<v8::Object> object = ToApi<v8::Object>(handle());
+ v8::Persistent<v8::Value> object = ToApi<v8::Value>(handle());
{
// Check that we are not passing a finalized external string to
// the callback.
@@ -276,9 +279,11 @@ class GlobalHandles::Node {
VMState<EXTERNAL> state(isolate);
if (near_death_callback_ != NULL) {
if (IsWeakCallback::decode(flags_)) {
- WeakReferenceCallback callback =
- reinterpret_cast<WeakReferenceCallback>(near_death_callback_);
- callback(object, par);
+ RevivableCallback callback =
+ reinterpret_cast<RevivableCallback>(near_death_callback_);
+ callback(reinterpret_cast<v8::Isolate*>(isolate),
+ &object,
+ par);
} else {
near_death_callback_(reinterpret_cast<v8::Isolate*>(isolate),
object,
@@ -490,9 +495,9 @@ void GlobalHandles::Destroy(Object** location) {
void GlobalHandles::MakeWeak(Object** location,
void* parameter,
- WeakReferenceCallback weak_reference_callback,
+ RevivableCallback weak_reference_callback,
NearDeathCallback near_death_callback) {
- ASSERT(near_death_callback != NULL);
+ ASSERT((weak_reference_callback == NULL) != (near_death_callback == NULL));
Node::FromLocation(location)->MakeWeak(this,
parameter,
weak_reference_callback,
diff --git a/deps/v8/src/global-handles.h b/deps/v8/src/global-handles.h
index 81e14765c..f502dfa24 100644
--- a/deps/v8/src/global-handles.h
+++ b/deps/v8/src/global-handles.h
@@ -130,6 +130,8 @@ class GlobalHandles {
// Destroy a global handle.
void Destroy(Object** location);
+ typedef WeakReferenceCallbacks<v8::Value, void>::Revivable RevivableCallback;
+
// Make the global handle weak and set the callback parameter for the
// handle. When the garbage collector recognizes that only weak global
// handles point to an object the handles are cleared and the callback
@@ -138,7 +140,7 @@ class GlobalHandles {
// reason is that Smi::FromInt(0) does not change during garage collection.
void MakeWeak(Object** location,
void* parameter,
- WeakReferenceCallback weak_reference_callback,
+ RevivableCallback weak_reference_callback,
NearDeathCallback near_death_callback);
void RecordStats(HeapStats* stats);
diff --git a/deps/v8/src/handles-inl.h b/deps/v8/src/handles-inl.h
index 5a3e9ed27..4f5e9fe72 100644
--- a/deps/v8/src/handles-inl.h
+++ b/deps/v8/src/handles-inl.h
@@ -53,8 +53,9 @@ Handle<T>::Handle(T* obj, Isolate* isolate) {
template <typename T>
inline bool Handle<T>::is_identical_to(const Handle<T> other) const {
- ASSERT(location_ == NULL ||
- reinterpret_cast<Address>(*location_) != kZapValue);
+ ASSERT(location_ == NULL || !(*location_)->IsFailure());
+ if (location_ == other.location_) return true;
+ if (location_ == NULL || other.location_ == NULL) return false;
// Dereferencing deferred handles to check object equality is safe.
SLOW_ASSERT(IsDereferenceAllowed(true) && other.IsDereferenceAllowed(true));
return *location_ == *other.location_;
@@ -63,24 +64,22 @@ inline bool Handle<T>::is_identical_to(const Handle<T> other) const {
template <typename T>
inline T* Handle<T>::operator*() const {
- ASSERT(location_ != NULL);
- ASSERT(reinterpret_cast<Address>(*location_) != kHandleZapValue);
+ ASSERT(location_ != NULL && !(*location_)->IsFailure());
SLOW_ASSERT(IsDereferenceAllowed(false));
return *BitCast<T**>(location_);
}
template <typename T>
inline T** Handle<T>::location() const {
- ASSERT(location_ == NULL ||
- reinterpret_cast<Address>(*location_) != kZapValue);
- SLOW_ASSERT(IsDereferenceAllowed(false));
+ ASSERT(location_ == NULL || !(*location_)->IsFailure());
+ SLOW_ASSERT(location_ == NULL || IsDereferenceAllowed(false));
return location_;
}
#ifdef DEBUG
template <typename T>
bool Handle<T>::IsDereferenceAllowed(bool allow_deferred) const {
- if (location_ == NULL) return true;
+ ASSERT(location_ != NULL);
Object* object = *BitCast<T**>(location_);
if (object->IsSmi()) return true;
HeapObject* heap_object = HeapObject::cast(object);
diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h
index f93742618..b71978baf 100644
--- a/deps/v8/src/heap-inl.h
+++ b/deps/v8/src/heap-inl.h
@@ -650,6 +650,10 @@ inline bool Heap::allow_allocation(bool new_state) {
return old;
}
+inline void Heap::set_allow_allocation(bool allocation_allowed) {
+ allocation_allowed_ = allocation_allowed;
+}
+
#endif
@@ -864,33 +868,41 @@ DisallowAllocationFailure::~DisallowAllocationFailure() {
#ifdef DEBUG
-AssertNoAllocation::AssertNoAllocation() {
- Isolate* isolate = ISOLATE;
- active_ = !isolate->optimizing_compiler_thread()->IsOptimizerThread();
- if (active_) {
- old_state_ = isolate->heap()->allow_allocation(false);
+bool EnterAllocationScope(Isolate* isolate, bool allow_allocation) {
+ bool active = !isolate->optimizing_compiler_thread()->IsOptimizerThread();
+ bool last_state = isolate->heap()->IsAllocationAllowed();
+ if (active) {
+ // TODO(yangguo): Make HandleDereferenceGuard avoid isolate mutation in the
+ // same way if running on the optimizer thread.
+ isolate->heap()->set_allow_allocation(allow_allocation);
}
+ return last_state;
}
-AssertNoAllocation::~AssertNoAllocation() {
- if (active_) HEAP->allow_allocation(old_state_);
+void ExitAllocationScope(Isolate* isolate, bool last_state) {
+ bool active = !isolate->optimizing_compiler_thread()->IsOptimizerThread();
+ if (active) {
+ isolate->heap()->set_allow_allocation(last_state);
+ }
}
-DisableAssertNoAllocation::DisableAssertNoAllocation() {
- Isolate* isolate = ISOLATE;
- active_ = !isolate->optimizing_compiler_thread()->IsOptimizerThread();
- if (active_) {
- old_state_ = isolate->heap()->allow_allocation(true);
- }
+AssertNoAllocation::AssertNoAllocation()
+ : last_state_(EnterAllocationScope(ISOLATE, false)) {
}
+AssertNoAllocation::~AssertNoAllocation() {
+ ExitAllocationScope(ISOLATE, last_state_);
+}
-DisableAssertNoAllocation::~DisableAssertNoAllocation() {
- if (active_) HEAP->allow_allocation(old_state_);
+DisableAssertNoAllocation::DisableAssertNoAllocation()
+ : last_state_(EnterAllocationScope(ISOLATE, true)) {
}
+DisableAssertNoAllocation::~DisableAssertNoAllocation() {
+ ExitAllocationScope(ISOLATE, last_state_);
+}
#else
AssertNoAllocation::AssertNoAllocation() { }
diff --git a/deps/v8/src/heap-snapshot-generator.cc b/deps/v8/src/heap-snapshot-generator.cc
index 3d890f720..f488304f4 100644
--- a/deps/v8/src/heap-snapshot-generator.cc
+++ b/deps/v8/src/heap-snapshot-generator.cc
@@ -1309,8 +1309,7 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
if (js_obj->HasFastProperties()) {
DescriptorArray* descs = js_obj->map()->instance_descriptors();
int real_size = js_obj->map()->NumberOfOwnDescriptors();
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
- if (descs->GetDetails(i).descriptor_index() > real_size) continue;
+ for (int i = 0; i < real_size; i++) {
switch (descs->GetType(i)) {
case FIELD: {
int index = descs->GetFieldIndex(i);
@@ -1332,7 +1331,7 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
js_obj->GetInObjectPropertyOffset(index));
}
} else {
- Object* value = js_obj->FastPropertyAt(index);
+ Object* value = js_obj->RawFastPropertyAt(index);
if (k != heap_->hidden_string()) {
SetPropertyReference(js_obj, entry, k, value);
} else {
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index fb2f9d9e4..6139080bf 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -3176,7 +3176,8 @@ void Heap::SetNumberStringCache(Object* number, String* string) {
MaybeObject* Heap::NumberToString(Object* number,
- bool check_number_string_cache) {
+ bool check_number_string_cache,
+ PretenureFlag pretenure) {
isolate_->counters()->number_to_string_runtime()->Increment();
if (check_number_string_cache) {
Object* cached = GetNumberStringCache(number);
@@ -3197,7 +3198,8 @@ MaybeObject* Heap::NumberToString(Object* number,
}
Object* js_string;
- MaybeObject* maybe_js_string = AllocateStringFromOneByte(CStrVector(str));
+ MaybeObject* maybe_js_string =
+ AllocateStringFromOneByte(CStrVector(str), pretenure);
if (maybe_js_string->ToObject(&js_string)) {
SetNumberStringCache(number, String::cast(js_string));
}
@@ -4156,7 +4158,9 @@ MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
for (int i = 0; i < count; i++) {
String* name = fun->shared()->GetThisPropertyAssignmentName(i);
ASSERT(name->IsInternalizedString());
- FieldDescriptor field(name, i, NONE, i + 1);
+ // TODO(verwaest): Since we cannot update the boilerplate's map yet,
+ // initialize to the worst case.
+ FieldDescriptor field(name, i, NONE, Representation::Tagged());
descriptors->Set(i, &field, witness);
}
descriptors->Sort();
@@ -4336,8 +4340,7 @@ MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor,
ElementsKind to_kind = static_cast<ElementsKind>(smi->value());
AllocationSiteMode mode = TRACK_ALLOCATION_SITE;
if (to_kind != initial_map->elements_kind()) {
- MaybeObject* maybe_new_map = constructor->GetElementsTransitionMap(
- isolate(), to_kind);
+ MaybeObject* maybe_new_map = initial_map->AsElementsKind(to_kind);
if (!maybe_new_map->To(&initial_map)) return maybe_new_map;
// Possibly alter the mode, since we found an updated elements kind
// in the type info cell.
@@ -4585,12 +4588,10 @@ MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
// The global object might be created from an object template with accessors.
// Fill these accessors into the dictionary.
DescriptorArray* descs = map->instance_descriptors();
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
+ for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
PropertyDetails details = descs->GetDetails(i);
ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
- PropertyDetails d = PropertyDetails(details.attributes(),
- CALLBACKS,
- details.descriptor_index());
+ PropertyDetails d = PropertyDetails(details.attributes(), CALLBACKS, i + 1);
Object* value = descs->GetCallbacksObject(i);
MaybeObject* maybe_value = AllocateJSGlobalPropertyCell(value);
if (!maybe_value->ToObject(&value)) return maybe_value;
diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h
index 7722079e5..add42c01d 100644
--- a/deps/v8/src/heap.h
+++ b/deps/v8/src/heap.h
@@ -1476,6 +1476,7 @@ class Heap {
#ifdef DEBUG
bool IsAllocationAllowed() { return allocation_allowed_; }
+ inline void set_allow_allocation(bool allocation_allowed);
inline bool allow_allocation(bool enable);
bool disallow_allocation_failure() {
@@ -1530,6 +1531,14 @@ class Heap {
return new_space_high_promotion_mode_active_;
}
+ inline PretenureFlag GetPretenureMode() {
+ return new_space_high_promotion_mode_active_ ? TENURED : NOT_TENURED;
+ }
+
+ inline Address* NewSpaceHighPromotionModeActiveAddress() {
+ return reinterpret_cast<Address*>(&new_space_high_promotion_mode_active_);
+ }
+
inline intptr_t PromotedTotalSize() {
return PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
}
@@ -1608,7 +1617,8 @@ class Heap {
static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index);
MUST_USE_RESULT MaybeObject* NumberToString(
- Object* number, bool check_number_string_cache = true);
+ Object* number, bool check_number_string_cache = true,
+ PretenureFlag pretenure = NOT_TENURED);
MUST_USE_RESULT MaybeObject* Uint32ToString(
uint32_t value, bool check_number_string_cache = true);
@@ -1975,7 +1985,8 @@ class Heap {
// Indicates that the new space should be kept small due to high promotion
// rates caused by the mutator allocating a lot of long-lived objects.
- bool new_space_high_promotion_mode_active_;
+ // TODO(hpayer): change to bool if no longer accessed from generated code
+ intptr_t new_space_high_promotion_mode_active_;
// Limit that triggers a global GC on the next (normally caused) GC. This
// is checked when we have already decided to do a GC to help determine
@@ -2691,6 +2702,13 @@ class DescriptorLookupCache {
// { AssertNoAllocation nogc;
// ...
// }
+
+#ifdef DEBUG
+inline bool EnterAllocationScope(Isolate* isolate, bool allow_allocation);
+inline void ExitAllocationScope(Isolate* isolate, bool last_state);
+#endif
+
+
class AssertNoAllocation {
public:
inline AssertNoAllocation();
@@ -2698,8 +2716,7 @@ class AssertNoAllocation {
#ifdef DEBUG
private:
- bool old_state_;
- bool active_;
+ bool last_state_;
#endif
};
@@ -2711,8 +2728,7 @@ class DisableAssertNoAllocation {
#ifdef DEBUG
private:
- bool old_state_;
- bool active_;
+ bool last_state_;
#endif
};
diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc
index 5f0cd9d31..8f8c59ea8 100644
--- a/deps/v8/src/hydrogen-instructions.cc
+++ b/deps/v8/src/hydrogen-instructions.cc
@@ -54,20 +54,6 @@ HYDROGEN_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
#undef DEFINE_COMPILE
-const char* Representation::Mnemonic() const {
- switch (kind_) {
- case kNone: return "v";
- case kTagged: return "t";
- case kDouble: return "d";
- case kInteger32: return "i";
- case kExternal: return "x";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
int HValue::LoopWeight() const {
const int w = FLAG_loop_weight;
static const int weights[] = { 1, w, w*w, w*w*w, w*w*w*w };
@@ -1615,15 +1601,6 @@ void HCheckMaps::SetSideEffectDominator(GVNFlag side_effect,
}
-void HLoadElements::PrintDataTo(StringStream* stream) {
- value()->PrintNameTo(stream);
- if (HasTypeCheck()) {
- stream->Add(" ");
- typecheck()->PrintNameTo(stream);
- }
-}
-
-
void HCheckMaps::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" [%p", *map_set()->first());
@@ -1972,6 +1949,10 @@ void HPhi::DeleteFromGraph() {
void HPhi::InitRealUses(int phi_id) {
// Initialize real uses.
phi_id_ = phi_id;
+ // Compute a conservative approximation of truncating uses before inferring
+ // representations. The proper, exact computation will be done later, when
+ // inserting representation changes.
+ SetFlag(kTruncatingToInt32);
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
HValue* value = it.value();
if (!value->IsPhi()) {
@@ -1981,6 +1962,9 @@ void HPhi::InitRealUses(int phi_id) {
PrintF("#%d Phi is used by real #%d %s as %s\n",
id(), value->id(), value->Mnemonic(), rep.Mnemonic());
}
+ if (!value->IsSimulate() && !value->CheckFlag(kTruncatingToInt32)) {
+ ClearFlag(kTruncatingToInt32);
+ }
}
}
}
@@ -2076,7 +2060,12 @@ HConstant::HConstant(Handle<Object> handle, Representation r)
has_int32_value_(false),
has_double_value_(false),
is_internalized_string_(false),
+ is_not_in_new_space_(true),
boolean_value_(handle->BooleanValue()) {
+ if (handle_->IsHeapObject()) {
+ Heap* heap = Handle<HeapObject>::cast(handle)->GetHeap();
+ is_not_in_new_space_ = !heap->InNewSpace(*handle);
+ }
if (handle_->IsNumber()) {
double n = handle_->Number();
has_int32_value_ = IsInteger32(n);
@@ -2105,12 +2094,14 @@ HConstant::HConstant(Handle<Object> handle,
Representation r,
HType type,
bool is_internalize_string,
+ bool is_not_in_new_space,
bool boolean_value)
: handle_(handle),
unique_id_(unique_id),
has_int32_value_(false),
has_double_value_(false),
is_internalized_string_(is_internalize_string),
+ is_not_in_new_space_(is_not_in_new_space),
boolean_value_(boolean_value),
type_from_value_(type) {
ASSERT(!handle.is_null());
@@ -2122,12 +2113,14 @@ HConstant::HConstant(Handle<Object> handle,
HConstant::HConstant(int32_t integer_value,
Representation r,
+ bool is_not_in_new_space,
Handle<Object> optional_handle)
: handle_(optional_handle),
unique_id_(),
has_int32_value_(true),
has_double_value_(true),
is_internalized_string_(false),
+ is_not_in_new_space_(is_not_in_new_space),
boolean_value_(integer_value != 0),
int32_value_(integer_value),
double_value_(FastI2D(integer_value)) {
@@ -2137,12 +2130,14 @@ HConstant::HConstant(int32_t integer_value,
HConstant::HConstant(double double_value,
Representation r,
+ bool is_not_in_new_space,
Handle<Object> optional_handle)
: handle_(optional_handle),
unique_id_(),
has_int32_value_(IsInteger32(double_value)),
has_double_value_(true),
is_internalized_string_(false),
+ is_not_in_new_space_(is_not_in_new_space),
boolean_value_(double_value != 0 && !std::isnan(double_value)),
int32_value_(DoubleToInt32(double_value)),
double_value_(double_value) {
@@ -2162,26 +2157,35 @@ void HConstant::Initialize(Representation r) {
HConstant* HConstant::CopyToRepresentation(Representation r, Zone* zone) const {
if (r.IsInteger32() && !has_int32_value_) return NULL;
if (r.IsDouble() && !has_double_value_) return NULL;
- if (has_int32_value_) return new(zone) HConstant(int32_value_, r, handle_);
- if (has_double_value_) return new(zone) HConstant(double_value_, r, handle_);
+ if (has_int32_value_) {
+ return new(zone) HConstant(int32_value_, r, is_not_in_new_space_, handle_);
+ }
+ if (has_double_value_) {
+ return new(zone) HConstant(double_value_, r, is_not_in_new_space_, handle_);
+ }
ASSERT(!handle_.is_null());
return new(zone) HConstant(handle_,
unique_id_,
r,
type_from_value_,
is_internalized_string_,
+ is_not_in_new_space_,
boolean_value_);
}
HConstant* HConstant::CopyToTruncatedInt32(Zone* zone) const {
if (has_int32_value_) {
- return new(zone) HConstant(
- int32_value_, Representation::Integer32(), handle_);
+ return new(zone) HConstant(int32_value_,
+ Representation::Integer32(),
+ is_not_in_new_space_,
+ handle_);
}
if (has_double_value_) {
- return new(zone) HConstant(
- DoubleToInt32(double_value_), Representation::Integer32(), handle_);
+ return new(zone) HConstant(DoubleToInt32(double_value_),
+ Representation::Integer32(),
+ is_not_in_new_space_,
+ handle_);
}
return NULL;
}
@@ -2517,6 +2521,8 @@ HLoadNamedFieldPolymorphic::HLoadNamedFieldPolymorphic(HValue* context,
i < types->length() && types_.length() < kMaxLoadPolymorphism;
++i) {
Handle<Map> map = types->at(i);
+ // Deprecated maps are updated to the current map in the type oracle.
+ ASSERT(!map->is_deprecated());
LookupResult lookup(map->GetIsolate());
map->LookupDescriptor(NULL, *name, &lookup);
if (lookup.IsFound()) {
@@ -2528,6 +2534,12 @@ HLoadNamedFieldPolymorphic::HLoadNamedFieldPolymorphic(HValue* context,
} else {
SetGVNFlag(kDependsOnBackingStoreFields);
}
+ if (FLAG_track_double_fields &&
+ lookup.representation().IsDouble()) {
+ // Since the value needs to be boxed, use a generic handler for
+ // loading doubles.
+ continue;
+ }
types_.Add(types->at(i), zone);
break;
}
@@ -3501,14 +3513,7 @@ void HPhi::SimplifyConstantInputs() {
void HPhi::InferRepresentation(HInferRepresentation* h_infer) {
ASSERT(CheckFlag(kFlexibleRepresentation));
- // If there are non-Phi uses, and all of them have observed the same
- // representation, than that's what this Phi is going to use.
- Representation new_rep = RepresentationObservedByAllNonPhiUses();
- if (!new_rep.IsNone()) {
- UpdateRepresentation(new_rep, h_infer, "unanimous use observations");
- return;
- }
- new_rep = RepresentationFromInputs();
+ Representation new_rep = RepresentationFromInputs();
UpdateRepresentation(new_rep, h_infer, "inputs");
new_rep = RepresentationFromUses();
UpdateRepresentation(new_rep, h_infer, "uses");
@@ -3517,22 +3522,6 @@ void HPhi::InferRepresentation(HInferRepresentation* h_infer) {
}
-Representation HPhi::RepresentationObservedByAllNonPhiUses() {
- int non_phi_use_count = 0;
- for (int i = Representation::kInteger32;
- i < Representation::kNumRepresentations; ++i) {
- non_phi_use_count += non_phi_uses_[i];
- }
- if (non_phi_use_count <= 1) return Representation::None();
- for (int i = 0; i < Representation::kNumRepresentations; ++i) {
- if (non_phi_uses_[i] == non_phi_use_count) {
- return Representation::FromKind(static_cast<Representation::Kind>(i));
- }
- }
- return Representation::None();
-}
-
-
Representation HPhi::RepresentationFromInputs() {
bool double_occurred = false;
bool int32_occurred = false;
diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h
index aa89f7146..3ea99d40a 100644
--- a/deps/v8/src/hydrogen-instructions.h
+++ b/deps/v8/src/hydrogen-instructions.h
@@ -135,7 +135,6 @@ class LChunkBuilder;
V(IsUndetectableAndBranch) \
V(LeaveInlined) \
V(LoadContextSlot) \
- V(LoadElements) \
V(LoadExternalArrayPointer) \
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
@@ -207,6 +206,7 @@ class LChunkBuilder;
V(Calls) \
V(InobjectFields) \
V(BackingStoreFields) \
+ V(DoubleFields) \
V(ElementsKind) \
V(ElementsPointer) \
V(ArrayElements) \
@@ -304,58 +304,6 @@ class Range: public ZoneObject {
};
-class Representation {
- public:
- enum Kind {
- kNone,
- kInteger32,
- kDouble,
- kTagged,
- kExternal,
- kNumRepresentations
- };
-
- Representation() : kind_(kNone) { }
-
- static Representation None() { return Representation(kNone); }
- static Representation Tagged() { return Representation(kTagged); }
- static Representation Integer32() { return Representation(kInteger32); }
- static Representation Double() { return Representation(kDouble); }
- static Representation External() { return Representation(kExternal); }
-
- static Representation FromKind(Kind kind) { return Representation(kind); }
-
- bool Equals(const Representation& other) {
- return kind_ == other.kind_;
- }
-
- bool is_more_general_than(const Representation& other) {
- ASSERT(kind_ != kExternal);
- ASSERT(other.kind_ != kExternal);
- return kind_ > other.kind_;
- }
-
- Kind kind() const { return static_cast<Kind>(kind_); }
- bool IsNone() const { return kind_ == kNone; }
- bool IsTagged() const { return kind_ == kTagged; }
- bool IsInteger32() const { return kind_ == kInteger32; }
- bool IsDouble() const { return kind_ == kDouble; }
- bool IsExternal() const { return kind_ == kExternal; }
- bool IsSpecialization() const {
- return kind_ == kInteger32 || kind_ == kDouble;
- }
- const char* Mnemonic() const;
-
- private:
- explicit Representation(Kind k) : kind_(k) { }
-
- // Make sure kind fits in int8.
- STATIC_ASSERT(kNumRepresentations <= (1 << kBitsPerByte));
-
- int8_t kind_;
-};
-
-
class UniqueValueId {
public:
UniqueValueId() : raw_address_(NULL) { }
@@ -2405,15 +2353,20 @@ class HCallNewArray: public HCallNew {
Handle<JSGlobalPropertyCell> type_cell)
: HCallNew(context, constructor, argument_count),
type_cell_(type_cell) {
+ elements_kind_ = static_cast<ElementsKind>(
+ Smi::cast(type_cell->value())->value());
}
Handle<JSGlobalPropertyCell> property_cell() const {
return type_cell_;
}
+ ElementsKind elements_kind() const { return elements_kind_; }
+
DECLARE_CONCRETE_INSTRUCTION(CallNewArray)
private:
+ ElementsKind elements_kind_;
Handle<JSGlobalPropertyCell> type_cell_;
};
@@ -2637,39 +2590,6 @@ class HUnaryMathOperation: public HTemplateInstruction<2> {
};
-class HLoadElements: public HTemplateInstruction<2> {
- public:
- HLoadElements(HValue* value, HValue* typecheck) {
- SetOperandAt(0, value);
- SetOperandAt(1, typecheck != NULL ? typecheck : value);
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnElementsPointer);
- }
-
- HValue* value() { return OperandAt(0); }
- HValue* typecheck() {
- ASSERT(HasTypeCheck());
- return OperandAt(1);
- }
- bool HasTypeCheck() const { return OperandAt(0) != OperandAt(1); }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadElements)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- private:
- virtual bool IsDeletable() const { return true; }
-};
-
-
class HLoadExternalArrayPointer: public HUnaryOperation {
public:
explicit HLoadExternalArrayPointer(HValue* value)
@@ -3065,7 +2985,6 @@ class HPhi: public HValue {
virtual Range* InferRange(Zone* zone);
virtual void InferRepresentation(HInferRepresentation* h_infer);
- Representation RepresentationObservedByAllNonPhiUses();
Representation RepresentationFromUseRequirements();
virtual Representation RequiredInputRepresentation(int index) {
return representation();
@@ -3238,19 +3157,24 @@ class HConstant: public HTemplateInstruction<0> {
HConstant(Handle<Object> handle, Representation r);
HConstant(int32_t value,
Representation r,
+ bool is_not_in_new_space = true,
Handle<Object> optional_handle = Handle<Object>::null());
HConstant(double value,
Representation r,
+ bool is_not_in_new_space = true,
Handle<Object> optional_handle = Handle<Object>::null());
HConstant(Handle<Object> handle,
UniqueValueId unique_id,
Representation r,
HType type,
bool is_internalized_string,
+ bool is_not_in_new_space,
bool boolean_value);
Handle<Object> handle() {
if (handle_.is_null()) {
+ // Default arguments to is_not_in_new_space depend on this heap number
+ // to be tenured so that it's guaranteed not be be located in new space.
handle_ = FACTORY->NewNumber(double_value_, TENURED);
}
ALLOW_HANDLE_DEREF(Isolate::Current(), "smi check");
@@ -3265,6 +3189,10 @@ class HConstant: public HTemplateInstruction<0> {
std::isnan(double_value_));
}
+ bool NotInNewSpace() const {
+ return is_not_in_new_space_;
+ }
+
bool ImmortalImmovable() const {
if (has_int32_value_) {
return false;
@@ -3411,6 +3339,7 @@ class HConstant: public HTemplateInstruction<0> {
bool has_int32_value_ : 1;
bool has_double_value_ : 1;
bool is_internalized_string_ : 1; // TODO(yangguo): make this part of HType.
+ bool is_not_in_new_space_ : 1;
bool boolean_value_ : 1;
int32_t int32_value_;
double double_value_;
@@ -3434,16 +3363,27 @@ class HBinaryOperation: public HTemplateInstruction<3> {
HValue* left() { return OperandAt(1); }
HValue* right() { return OperandAt(2); }
- // TODO(kasperl): Move these helpers to the IA-32 Lithium
- // instruction sequence builder.
- HValue* LeastConstantOperand() {
- if (IsCommutative() && left()->IsConstant()) return right();
- return left();
+ // True if switching left and right operands likely generates better code.
+ bool AreOperandsBetterSwitched() {
+ if (!IsCommutative()) return false;
+
+ // Constant operands are better off on the right, they can be inlined in
+ // many situations on most platforms.
+ if (left()->IsConstant()) return true;
+ if (right()->IsConstant()) return false;
+
+ // Otherwise, if there is only one use of the right operand, it would be
+ // better off on the left for platforms that only have 2-arg arithmetic
+ // ops (e.g ia32, x64) that clobber the left operand.
+ return (right()->UseCount() == 1);
}
- HValue* MostConstantOperand() {
- if (IsCommutative() && left()->IsConstant()) return left();
- return right();
+ HValue* BetterLeftOperand() {
+ return AreOperandsBetterSwitched() ? right() : left();
+ }
+
+ HValue* BetterRightOperand() {
+ return AreOperandsBetterSwitched() ? left() : right();
}
void set_observed_input_representation(int index, Representation rep) {
@@ -5265,29 +5205,45 @@ class HStoreContextSlot: public HTemplateInstruction<2> {
class HLoadNamedField: public HTemplateInstruction<2> {
public:
- HLoadNamedField(HValue* object, bool is_in_object, int offset,
- HValue* typecheck = NULL)
+ HLoadNamedField(HValue* object, bool is_in_object,
+ Representation field_representation,
+ int offset, HValue* typecheck = NULL)
: is_in_object_(is_in_object),
+ field_representation_(field_representation),
offset_(offset) {
ASSERT(object != NULL);
SetOperandAt(0, object);
SetOperandAt(1, typecheck != NULL ? typecheck : object);
- set_representation(Representation::Tagged());
+ if (FLAG_track_fields && field_representation.IsSmi()) {
+ set_type(HType::Smi());
+ set_representation(Representation::Tagged());
+ } else if (FLAG_track_double_fields && field_representation.IsDouble()) {
+ set_representation(field_representation);
+ } else {
+ set_representation(Representation::Tagged());
+ }
SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnMaps);
- if (is_in_object) {
+ if (FLAG_track_double_fields && representation().IsDouble()) {
+ ASSERT(is_in_object);
+ ASSERT(offset == HeapNumber::kValueOffset);
+ SetGVNFlag(kDependsOnDoubleFields);
+ } else if (is_in_object) {
SetGVNFlag(kDependsOnInobjectFields);
+ SetGVNFlag(kDependsOnMaps);
} else {
SetGVNFlag(kDependsOnBackingStoreFields);
+ SetGVNFlag(kDependsOnMaps);
}
}
static HLoadNamedField* NewArrayLength(Zone* zone, HValue* object,
HValue* typecheck,
HType type = HType::Tagged()) {
+ Representation representation =
+ type.IsSmi() ? Representation::Smi() : Representation::Tagged();
HLoadNamedField* result = new(zone) HLoadNamedField(
- object, true, JSArray::kLengthOffset, typecheck);
+ object, true, representation, JSArray::kLengthOffset, typecheck);
result->set_type(type);
result->SetGVNFlag(kDependsOnArrayLengths);
result->ClearGVNFlag(kDependsOnInobjectFields);
@@ -5302,6 +5258,7 @@ class HLoadNamedField: public HTemplateInstruction<2> {
bool HasTypeCheck() const { return OperandAt(0) != OperandAt(1); }
bool is_in_object() const { return is_in_object_; }
+ Representation field_representation() const { return representation_; }
int offset() const { return offset_; }
virtual Representation RequiredInputRepresentation(int index) {
@@ -5321,6 +5278,7 @@ class HLoadNamedField: public HTemplateInstruction<2> {
virtual bool IsDeletable() const { return true; }
bool is_in_object_;
+ Representation field_representation_;
int offset_;
};
@@ -5615,29 +5573,41 @@ class HLoadKeyedGeneric: public HTemplateInstruction<3> {
class HStoreNamedField: public HTemplateInstruction<2> {
public:
HStoreNamedField(HValue* obj,
- Handle<String> name,
+ Handle<Name> name,
HValue* val,
bool in_object,
+ Representation field_representation,
int offset)
: name_(name),
is_in_object_(in_object),
+ field_representation_(field_representation),
offset_(offset),
transition_unique_id_(),
new_space_dominator_(NULL) {
SetOperandAt(0, obj);
SetOperandAt(1, val);
SetFlag(kTrackSideEffectDominators);
- SetGVNFlag(kDependsOnNewSpacePromotion);
- if (is_in_object_) {
+ if (FLAG_track_double_fields && field_representation.IsDouble()) {
+ SetGVNFlag(kChangesDoubleFields);
+ } else if (is_in_object_) {
SetGVNFlag(kChangesInobjectFields);
+ SetGVNFlag(kDependsOnNewSpacePromotion);
} else {
SetGVNFlag(kChangesBackingStoreFields);
+ SetGVNFlag(kDependsOnNewSpacePromotion);
}
}
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField)
virtual Representation RequiredInputRepresentation(int index) {
+ if (FLAG_track_double_fields &&
+ index == 1 && field_representation_.IsDouble()) {
+ return field_representation_;
+ } else if (FLAG_track_fields &&
+ index == 1 && field_representation_.IsSmi()) {
+ return Representation::Integer32();
+ }
return Representation::Tagged();
}
virtual void SetSideEffectDominator(GVNFlag side_effect, HValue* dominator) {
@@ -5649,7 +5619,7 @@ class HStoreNamedField: public HTemplateInstruction<2> {
HValue* object() { return OperandAt(0); }
HValue* value() { return OperandAt(1); }
- Handle<String> name() const { return name_; }
+ Handle<Name> name() const { return name_; }
bool is_in_object() const { return is_in_object_; }
int offset() const { return offset_; }
Handle<Map> transition() const { return transition_; }
@@ -5658,7 +5628,12 @@ class HStoreNamedField: public HTemplateInstruction<2> {
HValue* new_space_dominator() const { return new_space_dominator_; }
bool NeedsWriteBarrier() {
- return StoringValueNeedsWriteBarrier(value()) &&
+ ASSERT(!(FLAG_track_double_fields && field_representation_.IsDouble()) ||
+ transition_.is_null());
+ return (!FLAG_track_fields || !field_representation_.IsSmi()) &&
+ // If there is a transition, a new storage object needs to be allocated.
+ !(FLAG_track_double_fields && field_representation_.IsDouble()) &&
+ StoringValueNeedsWriteBarrier(value()) &&
ReceiverObjectNeedsWriteBarrier(object(), new_space_dominator());
}
@@ -5670,9 +5645,14 @@ class HStoreNamedField: public HTemplateInstruction<2> {
transition_unique_id_ = UniqueValueId(transition_);
}
+ Representation field_representation() const {
+ return field_representation_;
+ }
+
private:
- Handle<String> name_;
+ Handle<Name> name_;
bool is_in_object_;
+ Representation field_representation_;
int offset_;
Handle<Map> transition_;
UniqueValueId transition_unique_id_;
@@ -6159,12 +6139,14 @@ class HObjectLiteral: public HMaterializedLiteral<1> {
bool fast_elements,
int literal_index,
int depth,
+ bool may_store_doubles,
bool has_function)
: HMaterializedLiteral<1>(literal_index, depth),
constant_properties_(constant_properties),
constant_properties_length_(constant_properties->length()),
literals_(literals),
fast_elements_(fast_elements),
+ may_store_doubles_(may_store_doubles),
has_function_(has_function) {
SetOperandAt(0, context);
SetGVNFlag(kChangesNewSpacePromotion);
@@ -6179,6 +6161,7 @@ class HObjectLiteral: public HMaterializedLiteral<1> {
}
Handle<FixedArray> literals() const { return literals_; }
bool fast_elements() const { return fast_elements_; }
+ bool may_store_doubles() const { return may_store_doubles_; }
bool has_function() const { return has_function_; }
virtual Representation RequiredInputRepresentation(int index) {
@@ -6193,6 +6176,7 @@ class HObjectLiteral: public HMaterializedLiteral<1> {
int constant_properties_length_;
Handle<FixedArray> literals_;
bool fast_elements_ : 1;
+ bool may_store_doubles_ : 1;
bool has_function_ : 1;
};
diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc
index a978834c0..5c573feb1 100644
--- a/deps/v8/src/hydrogen.cc
+++ b/deps/v8/src/hydrogen.cc
@@ -641,6 +641,7 @@ HConstant* HGraph::GetConstant##Name() { \
Representation::Tagged(), \
htype, \
false, \
+ true, \
boolean_value); \
constant->InsertAfter(GetConstantUndefined()); \
constant_##name##_.set(constant); \
@@ -1191,10 +1192,13 @@ HValue* HGraphBuilder::BuildCheckForCapacityGrow(HValue* object,
new_length->ClearFlag(HValue::kCanOverflow);
Factory* factory = isolate()->factory();
+ Representation representation = IsFastElementsKind(kind)
+ ? Representation::Smi() : Representation::Tagged();
HInstruction* length_store = AddInstruction(new(zone) HStoreNamedField(
object,
factory->length_field_string(),
new_length, true,
+ representation,
JSArray::kLengthOffset));
length_store->SetGVNFlag(kChangesArrayLengths);
}
@@ -1267,8 +1271,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
}
bool fast_smi_only_elements = IsFastSmiElementsKind(elements_kind);
bool fast_elements = IsFastObjectElementsKind(elements_kind);
- HValue* elements =
- AddInstruction(new(zone) HLoadElements(object, mapcheck));
+ HValue* elements = AddLoadElements(object, mapcheck);
if (is_store && (fast_elements || fast_smi_only_elements) &&
store_mode != STORE_NO_TRANSITION_HANDLE_COW) {
HCheckMaps* check_cow_map = HCheckMaps::New(
@@ -1413,9 +1416,12 @@ void HGraphBuilder::BuildInitializeElements(HValue* elements,
BuildStoreMap(elements, map);
Handle<String> fixed_array_length_field_name = factory->length_field_string();
+ Representation representation = IsFastElementsKind(kind)
+ ? Representation::Smi() : Representation::Tagged();
HInstruction* store_length =
new(zone) HStoreNamedField(elements, fixed_array_length_field_name,
- capacity, true, FixedArray::kLengthOffset);
+ capacity, true, representation,
+ FixedArray::kLengthOffset);
AddInstruction(store_length);
}
@@ -1447,6 +1453,7 @@ HInnerAllocatedObject* HGraphBuilder::BuildJSArrayHeader(HValue* array,
isolate()->factory()->properties_field_symbol(),
empty_fixed_array,
true,
+ Representation::Tagged(),
JSArray::kPropertiesOffset));
HInstruction* length_store = AddInstruction(
@@ -1454,6 +1461,7 @@ HInnerAllocatedObject* HGraphBuilder::BuildJSArrayHeader(HValue* array,
isolate()->factory()->length_field_string(),
length_field,
true,
+ Representation::Tagged(),
JSArray::kLengthOffset));
length_store->SetGVNFlag(kChangesArrayLengths);
@@ -1479,6 +1487,7 @@ HInnerAllocatedObject* HGraphBuilder::BuildJSArrayHeader(HValue* array,
isolate()->factory()->elements_field_string(),
elements,
true,
+ Representation::Tagged(),
JSArray::kElementsOffset));
elements_store->SetGVNFlag(kChangesElementsPointer);
@@ -1493,7 +1502,9 @@ HInstruction* HGraphBuilder::BuildStoreMap(HValue* object,
Handle<String> map_field_name = factory->map_field_string();
HInstruction* store_map =
new(zone) HStoreNamedField(object, map_field_name, map,
- true, JSObject::kMapOffset);
+ true, Representation::Tagged(),
+ JSObject::kMapOffset);
+ store_map->ClearGVNFlag(kChangesInobjectFields);
store_map->SetGVNFlag(kChangesMaps);
AddInstruction(store_map);
return store_map;
@@ -1509,6 +1520,18 @@ HInstruction* HGraphBuilder::BuildStoreMap(HValue* object,
}
+HLoadNamedField* HGraphBuilder::AddLoadElements(HValue* object,
+ HValue* typecheck) {
+ HLoadNamedField* instr = new(zone()) HLoadNamedField(object, true,
+ Representation::Tagged(), JSObject::kElementsOffset, typecheck);
+ AddInstruction(instr);
+ instr->SetGVNFlag(kDependsOnElementsPointer);
+ instr->ClearGVNFlag(kDependsOnMaps);
+ instr->ClearGVNFlag(kDependsOnInobjectFields);
+ return instr;
+}
+
+
HValue* HGraphBuilder::BuildNewElementsCapacity(HValue* context,
HValue* old_capacity) {
Zone* zone = this->zone();
@@ -1574,7 +1597,7 @@ HValue* HGraphBuilder::BuildGrowElementsCapacity(HValue* object,
HInstruction* elements_store = AddInstruction(new(zone) HStoreNamedField(
object,
factory->elements_field_string(),
- new_elements, true,
+ new_elements, true, Representation::Tagged(),
JSArray::kElementsOffset));
elements_store->SetGVNFlag(kChangesElementsPointer);
@@ -1711,13 +1734,13 @@ HValue* HGraphBuilder::BuildCloneShallowArray(HContext* context,
// Copy the JS array part.
for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
if ((i != JSArray::kElementsOffset) || (length == 0)) {
- HInstruction* value =
- AddInstruction(new(zone) HLoadNamedField(boilerplate, true, i));
+ HInstruction* value = AddInstruction(new(zone) HLoadNamedField(
+ boilerplate, true, Representation::Tagged(), i));
if (i != JSArray::kMapOffset) {
AddInstruction(new(zone) HStoreNamedField(object,
factory->empty_string(),
- value,
- true, i));
+ value, true,
+ Representation::Tagged(), i));
} else {
BuildStoreMap(object, value);
}
@@ -1732,24 +1755,24 @@ HValue* HGraphBuilder::BuildCloneShallowArray(HContext* context,
if (length > 0) {
// Get hold of the elements array of the boilerplate and setup the
// elements pointer in the resulting object.
- HValue* boilerplate_elements =
- AddInstruction(new(zone) HLoadElements(boilerplate, NULL));
+ HValue* boilerplate_elements = AddLoadElements(boilerplate);
HValue* object_elements =
AddInstruction(new(zone) HInnerAllocatedObject(object, elems_offset));
AddInstruction(new(zone) HStoreNamedField(object,
factory->elements_field_string(),
- object_elements,
- true, JSObject::kElementsOffset));
+ object_elements, true,
+ Representation::Tagged(),
+ JSObject::kElementsOffset));
// Copy the elements array header.
for (int i = 0; i < FixedArrayBase::kHeaderSize; i += kPointerSize) {
HInstruction* value =
- AddInstruction(new(zone) HLoadNamedField(boilerplate_elements,
- true, i));
+ AddInstruction(new(zone) HLoadNamedField(
+ boilerplate_elements, true, Representation::Tagged(), i));
AddInstruction(new(zone) HStoreNamedField(object_elements,
factory->empty_string(),
- value,
- true, i));
+ value, true,
+ Representation::Tagged(), i));
}
// Copy the elements array contents.
@@ -1834,11 +1857,32 @@ HValue* HGraphBuilder::BuildCreateAllocationSiteInfo(HValue* previous_object,
isolate()->factory()->payload_string(),
payload,
true,
+ Representation::Tagged(),
AllocationSiteInfo::kPayloadOffset));
return alloc_site;
}
+HInstruction* HGraphBuilder::BuildGetNativeContext(HValue* context) {
+ HInstruction* global_object = AddInstruction(new(zone())
+ HGlobalObject(context));
+ HInstruction* native_context = AddInstruction(new(zone())
+ HLoadNamedField(global_object, true, Representation::Tagged(),
+ GlobalObject::kNativeContextOffset));
+ return native_context;
+}
+
+
+HInstruction* HGraphBuilder::BuildGetArrayFunction(HValue* context) {
+ HInstruction* native_context = BuildGetNativeContext(context);
+ int offset = Context::kHeaderSize +
+ kPointerSize * Context::ARRAY_FUNCTION_INDEX;
+ HInstruction* array_function = AddInstruction(new(zone())
+ HLoadNamedField(native_context, true, Representation::Tagged(), offset));
+ return array_function;
+}
+
+
HGraphBuilder::JSArrayBuilder::JSArrayBuilder(HGraphBuilder* builder,
ElementsKind kind,
HValue* allocation_site_payload,
@@ -1855,17 +1899,14 @@ HGraphBuilder::JSArrayBuilder::JSArrayBuilder(HGraphBuilder* builder,
HValue* HGraphBuilder::JSArrayBuilder::EmitMapCode(HValue* context) {
- // Get the global context, the native context, the map array
- HInstruction* global_object = AddInstruction(new(zone())
- HGlobalObject(context));
- HInstruction* native_context = AddInstruction(new(zone())
- HLoadNamedField(global_object, true, GlobalObject::kNativeContextOffset));
+ HInstruction* native_context = builder()->BuildGetNativeContext(context);
int offset = Context::kHeaderSize +
kPointerSize * Context::JS_ARRAY_MAPS_INDEX;
HInstruction* map_array = AddInstruction(new(zone())
- HLoadNamedField(native_context, true, offset));
+ HLoadNamedField(native_context, true, Representation::Tagged(), offset));
offset = kind_ * kPointerSize + FixedArrayBase::kHeaderSize;
- return AddInstruction(new(zone()) HLoadNamedField(map_array, true, offset));
+ return AddInstruction(new(zone()) HLoadNamedField(
+ map_array, true, Representation::Tagged(), offset));
}
@@ -3747,7 +3788,39 @@ void HInferRepresentation::Analyze() {
}
}
+ // Set truncation flags for groups of connected phis. This is a conservative
+ // approximation; the flag will be properly re-computed after representations
+ // have been determined.
+ if (phi_count > 0) {
+ BitVector* done = new(zone()) BitVector(phi_count, graph_->zone());
+ for (int i = 0; i < phi_count; ++i) {
+ if (done->Contains(i)) continue;
+
+ // Check if all uses of all connected phis in this group are truncating.
+ bool all_uses_everywhere_truncating = true;
+ for (BitVector::Iterator it(connected_phis.at(i));
+ !it.Done();
+ it.Advance()) {
+ int index = it.Current();
+ all_uses_everywhere_truncating &=
+ phi_list->at(index)->CheckFlag(HInstruction::kTruncatingToInt32);
+ done->Add(index);
+ }
+ if (all_uses_everywhere_truncating) {
+ continue; // Great, nothing to do.
+ }
+ // Clear truncation flag of this group of connected phis.
+ for (BitVector::Iterator it(connected_phis.at(i));
+ !it.Done();
+ it.Advance()) {
+ int index = it.Current();
+ phi_list->at(index)->ClearFlag(HInstruction::kTruncatingToInt32);
+ }
+ }
+ }
+
// Simplify constant phi inputs where possible.
+ // This step uses kTruncatingToInt32 flags of phis.
for (int i = 0; i < phi_count; ++i) {
phi_list->at(i)->SimplifyConstantInputs();
}
@@ -4027,36 +4100,50 @@ void HGraph::InsertRepresentationChanges() {
// int32-phis allow truncation and iteratively remove the ones that
// are used in an operation that does not allow a truncating
// conversion.
- // TODO(fschneider): Replace this with a worklist-based iteration.
+ ZoneList<HPhi*> worklist(8, zone());
+
for (int i = 0; i < phi_list()->length(); i++) {
HPhi* phi = phi_list()->at(i);
if (phi->representation().IsInteger32()) {
phi->SetFlag(HValue::kTruncatingToInt32);
}
}
- bool change = true;
- while (change) {
- change = false;
- for (int i = 0; i < phi_list()->length(); i++) {
- HPhi* phi = phi_list()->at(i);
- if (!phi->CheckFlag(HValue::kTruncatingToInt32)) continue;
- for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
- // If a Phi is used as a non-truncating int32 or as a double,
- // clear its "truncating" flag.
- HValue* use = it.value();
- Representation input_representation =
- use->RequiredInputRepresentation(it.index());
- if ((input_representation.IsInteger32() &&
- !use->CheckFlag(HValue::kTruncatingToInt32)) ||
- input_representation.IsDouble()) {
- if (FLAG_trace_representation) {
- PrintF("#%d Phi is not truncating because of #%d %s\n",
- phi->id(), it.value()->id(), it.value()->Mnemonic());
- }
- phi->ClearFlag(HValue::kTruncatingToInt32);
- change = true;
- break;
+
+ for (int i = 0; i < phi_list()->length(); i++) {
+ HPhi* phi = phi_list()->at(i);
+ for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
+ // If a Phi is used as a non-truncating int32 or as a double,
+ // clear its "truncating" flag.
+ HValue* use = it.value();
+ Representation input_representation =
+ use->RequiredInputRepresentation(it.index());
+ if ((input_representation.IsInteger32() &&
+ !use->CheckFlag(HValue::kTruncatingToInt32)) ||
+ input_representation.IsDouble()) {
+ if (FLAG_trace_representation) {
+ PrintF("#%d Phi is not truncating because of #%d %s\n",
+ phi->id(), it.value()->id(), it.value()->Mnemonic());
+ }
+ phi->ClearFlag(HValue::kTruncatingToInt32);
+ worklist.Add(phi, zone());
+ break;
+ }
+ }
+ }
+
+ while (!worklist.is_empty()) {
+ HPhi* current = worklist.RemoveLast();
+ for (int i = 0; i < current->OperandCount(); ++i) {
+ HValue* input = current->OperandAt(i);
+ if (input->IsPhi() &&
+ input->representation().IsInteger32() &&
+ input->CheckFlag(HValue::kTruncatingToInt32)) {
+ if (FLAG_trace_representation) {
+ PrintF("#%d Phi is not truncating because of #%d %s\n",
+ input->id(), current->id(), current->Mnemonic());
}
+ input->ClearFlag(HValue::kTruncatingToInt32);
+ worklist.Add(HPhi::cast(input), zone());
}
}
}
@@ -5364,6 +5451,9 @@ void HGraph::DeadCodeElimination() {
while (!worklist.is_empty()) {
HInstruction* instr = worklist.RemoveLast();
+ // This happens when an instruction is used multiple times as operand. That
+ // in turn could happen through GVN.
+ if (!instr->IsLinked()) continue;
if (FLAG_trace_dead_code_elimination) {
HeapStringAllocator allocator;
StringStream stream(&allocator);
@@ -6604,10 +6694,16 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate,
if (properties->length() > 0) {
return false;
} else {
- int nof = boilerplate->map()->inobject_properties();
- for (int i = 0; i < nof; i++) {
+ Handle<DescriptorArray> descriptors(
+ boilerplate->map()->instance_descriptors());
+ int limit = boilerplate->map()->NumberOfOwnDescriptors();
+ for (int i = 0; i < limit; i++) {
+ PropertyDetails details = descriptors->GetDetails(i);
+ if (details.type() != FIELD) continue;
+ Representation representation = details.representation();
+ int index = descriptors->GetFieldIndex(i);
if ((*max_properties)-- == 0) return false;
- Handle<Object> value(boilerplate->InObjectPropertyAt(i), isolate);
+ Handle<Object> value(boilerplate->InObjectPropertyAt(index), isolate);
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
if (!IsFastLiteral(value_object,
@@ -6617,6 +6713,8 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate,
pointer_size)) {
return false;
}
+ } else if (representation.IsDouble()) {
+ *data_size += HeapNumber::kSize;
}
}
}
@@ -6666,6 +6764,7 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
expr->fast_elements(),
expr->literal_index(),
expr->depth(),
+ expr->may_store_doubles(),
expr->has_function()));
}
@@ -6814,7 +6913,7 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
// of the property values and is the value of the entire expression.
Push(literal);
- HLoadElements* elements = NULL;
+ HInstruction* elements = NULL;
for (int i = 0; i < length; i++) {
Expression* subexpr = subexprs->at(i);
@@ -6826,10 +6925,7 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
HValue* value = Pop();
if (!Smi::IsValid(i)) return Bailout("Non-smi key in array literal");
- // Pass in literal as dummy depedency, since the receiver always has
- // elements.
- elements = new(zone()) HLoadElements(literal, literal);
- AddInstruction(elements);
+ elements = AddLoadElements(literal);
HValue* key = AddInstruction(
new(zone()) HConstant(Handle<Object>(Smi::FromInt(i), isolate()),
@@ -6887,14 +6983,29 @@ static bool ComputeLoadStoreField(Handle<Map> type,
static int ComputeLoadStoreFieldIndex(Handle<Map> type,
- Handle<String> name,
LookupResult* lookup) {
ASSERT(lookup->IsField() || lookup->IsTransitionToField(*type));
if (lookup->IsField()) {
return lookup->GetLocalFieldIndexFromMap(*type);
} else {
Map* transition = lookup->GetTransitionMapFromMap(*type);
- return transition->PropertyIndexFor(*name) - type->inobject_properties();
+ int descriptor = transition->LastAdded();
+ int index = transition->instance_descriptors()->GetFieldIndex(descriptor);
+ return index - type->inobject_properties();
+ }
+}
+
+
+static Representation ComputeLoadStoreRepresentation(Handle<Map> type,
+ LookupResult* lookup) {
+ if (lookup->IsField()) {
+ return lookup->representation();
+ } else {
+ Map* transition = lookup->GetTransitionMapFromMap(*type);
+ int descriptor = transition->LastAdded();
+ PropertyDetails details =
+ transition->instance_descriptors()->GetDetails(descriptor);
+ return details.representation();
}
}
@@ -6949,8 +7060,9 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
zone()));
}
- int index = ComputeLoadStoreFieldIndex(map, name, lookup);
+ int index = ComputeLoadStoreFieldIndex(map, lookup);
bool is_in_object = index < 0;
+ Representation representation = ComputeLoadStoreRepresentation(map, lookup);
int offset = index * kPointerSize;
if (index < 0) {
// Negative property indices are in-object properties, indexed
@@ -6959,9 +7071,33 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
} else {
offset += FixedArray::kHeaderSize;
}
- HStoreNamedField* instr =
- new(zone()) HStoreNamedField(object, name, value, is_in_object, offset);
- if (lookup->IsTransitionToField(*map)) {
+ bool transition_to_field = lookup->IsTransitionToField(*map);
+ if (FLAG_track_double_fields && representation.IsDouble()) {
+ if (transition_to_field) {
+ NoObservableSideEffectsScope no_side_effects(this);
+ HInstruction* heap_number_size = AddInstruction(new(zone()) HConstant(
+ HeapNumber::kSize, Representation::Integer32()));
+ HInstruction* double_box = AddInstruction(new(zone()) HAllocate(
+ environment()->LookupContext(), heap_number_size,
+ HType::HeapNumber(), HAllocate::CAN_ALLOCATE_IN_NEW_SPACE));
+ BuildStoreMap(double_box, isolate()->factory()->heap_number_map());
+ AddInstruction(new(zone()) HStoreNamedField(
+ double_box, name, value, true,
+ Representation::Double(), HeapNumber::kValueOffset));
+ value = double_box;
+ representation = Representation::Tagged();
+ } else {
+ HInstruction* double_box = AddInstruction(new(zone()) HLoadNamedField(
+ object, is_in_object, Representation::Tagged(), offset));
+ double_box->set_type(HType::HeapNumber());
+ return new(zone()) HStoreNamedField(
+ double_box, name, value, true,
+ Representation::Double(), HeapNumber::kValueOffset);
+ }
+ }
+ HStoreNamedField* instr = new(zone()) HStoreNamedField(
+ object, name, value, is_in_object, representation, offset);
+ if (transition_to_field) {
Handle<Map> transition(lookup->GetTransitionMapFromMap(*map));
instr->set_transition(transition);
// TODO(fschneider): Record the new map type of the object in the IR to
@@ -7043,22 +7179,31 @@ void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr,
HValue* object,
SmallMapList* types,
Handle<String> name) {
- int count = 0;
- int previous_field_offset = 0;
- bool previous_field_is_in_object = false;
- bool is_monomorphic_field = true;
if (HandlePolymorphicArrayLengthLoad(expr, object, types, name))
return;
- Handle<Map> map;
- LookupResult lookup(isolate());
- for (int i = 0; i < types->length() && count < kMaxLoadPolymorphism; ++i) {
- map = types->at(i);
- if (ComputeLoadStoreField(map, name, &lookup, false)) {
- int index = ComputeLoadStoreFieldIndex(map, name, &lookup);
+ AddInstruction(new(zone()) HCheckNonSmi(object));
+
+ // Use monomorphic load if property lookup results in the same field index
+ // for all maps. Requires special map check on the set of all handled maps.
+ HInstruction* instr = NULL;
+ if (types->length() > 0 && types->length() <= kMaxLoadPolymorphism) {
+ LookupResult lookup(isolate());
+ int previous_field_offset = 0;
+ bool previous_field_is_in_object = false;
+ Representation representation = Representation::None();
+ int count;
+ for (count = 0; count < types->length(); ++count) {
+ Handle<Map> map = types->at(count);
+ if (!ComputeLoadStoreField(map, name, &lookup, false)) break;
+
+ int index = ComputeLoadStoreFieldIndex(map, &lookup);
+ Representation new_representation =
+ ComputeLoadStoreRepresentation(map, &lookup);
bool is_in_object = index < 0;
int offset = index * kPointerSize;
+
if (index < 0) {
// Negative property indices are in-object properties, indexed
// from the end of the fixed part of the object.
@@ -7066,31 +7211,33 @@ void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr,
} else {
offset += FixedArray::kHeaderSize;
}
+
if (count == 0) {
previous_field_offset = offset;
previous_field_is_in_object = is_in_object;
- } else if (is_monomorphic_field) {
- is_monomorphic_field = (offset == previous_field_offset) &&
- (is_in_object == previous_field_is_in_object);
+ representation = new_representation;
+ } else if (offset != previous_field_offset ||
+ is_in_object != previous_field_is_in_object ||
+ (FLAG_track_fields &&
+ !representation.IsCompatibleForLoad(new_representation))) {
+ break;
}
- ++count;
+
+ representation = representation.generalize(new_representation);
+ }
+
+ if (count == types->length()) {
+ AddInstruction(HCheckMaps::New(object, types, zone()));
+ instr = DoBuildLoadNamedField(
+ object, previous_field_is_in_object,
+ representation, previous_field_offset);
}
}
- // Use monomorphic load if property lookup results in the same field index
- // for all maps. Requires special map check on the set of all handled maps.
- AddInstruction(new(zone()) HCheckNonSmi(object));
- HInstruction* instr;
- if (count == types->length() && is_monomorphic_field) {
- AddInstruction(HCheckMaps::New(object, types, zone()));
- instr = BuildLoadNamedField(object, map, &lookup);
- } else {
+ if (instr == NULL) {
HValue* context = environment()->LookupContext();
- instr = new(zone()) HLoadNamedFieldPolymorphic(context,
- object,
- types,
- name,
- zone());
+ instr = new(zone()) HLoadNamedFieldPolymorphic(
+ context, object, types, name, zone());
}
instr->set_position(expr->position());
@@ -7239,14 +7386,15 @@ void HOptimizedGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
// Keyed store.
CHECK_ALIVE(VisitForValue(prop->key()));
CHECK_ALIVE(VisitForValue(expr->value()));
- HValue* value = Pop();
- HValue* key = Pop();
- HValue* object = Pop();
+ HValue* value = environment()->ExpressionStackAt(0);
+ HValue* key = environment()->ExpressionStackAt(1);
+ HValue* object = environment()->ExpressionStackAt(2);
bool has_side_effects = false;
HandleKeyedElementAccess(object, key, value, expr, expr->AssignmentId(),
expr->position(),
true, // is_store
&has_side_effects);
+ Drop(3);
Push(value);
AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
return ast_context()->ReturnValue(Pop());
@@ -7648,16 +7796,37 @@ HLoadNamedField* HOptimizedGraphBuilder::BuildLoadNamedField(
Handle<Map> map,
LookupResult* lookup) {
int index = lookup->GetLocalFieldIndexFromMap(*map);
- if (index < 0) {
- // Negative property indices are in-object properties, indexed
- // from the end of the fixed part of the object.
- int offset = (index * kPointerSize) + map->instance_size();
- return new(zone()) HLoadNamedField(object, true, offset);
- } else {
- // Non-negative property indices are in the properties array.
- int offset = (index * kPointerSize) + FixedArray::kHeaderSize;
- return new(zone()) HLoadNamedField(object, false, offset);
+ // Negative property indices are in-object properties, indexed from the end of
+ // the fixed part of the object. Non-negative property indices are in the
+ // properties array.
+ int inobject = index < 0;
+ Representation representation = lookup->representation();
+ int offset = inobject
+ ? index * kPointerSize + map->instance_size()
+ : index * kPointerSize + FixedArray::kHeaderSize;
+ return DoBuildLoadNamedField(object, inobject, representation, offset);
+}
+
+
+HLoadNamedField* HGraphBuilder::DoBuildLoadNamedField(
+ HValue* object,
+ bool inobject,
+ Representation representation,
+ int offset) {
+ bool load_double = false;
+ if (representation.IsDouble()) {
+ representation = Representation::Tagged();
+ load_double = FLAG_track_double_fields;
}
+ HLoadNamedField* field =
+ new(zone()) HLoadNamedField(object, inobject, representation, offset);
+ if (load_double) {
+ AddInstruction(field);
+ field->set_type(HType::HeapNumber());
+ return new(zone()) HLoadNamedField(
+ field, true, Representation::Double(), HeapNumber::kValueOffset);
+ }
+ return field;
}
@@ -7927,8 +8096,7 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
HInstruction* elements_kind_instr =
AddInstruction(new(zone()) HElementsKind(object));
- HInstruction* elements =
- AddInstruction(new(zone()) HLoadElements(object, checkspec));
+ HInstruction* elements = AddLoadElements(object, checkspec);
HLoadExternalArrayPointer* external_elements = NULL;
HInstruction* checked_key = NULL;
@@ -9555,16 +9723,13 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
CHECK_ALIVE(VisitArgumentList(expr->arguments()));
HCallNew* call;
if (use_call_new_array) {
- AddInstruction(new(zone()) HCheckFunction(constructor,
- Handle<JSFunction>(isolate()->global_context()->array_function())));
- Handle<Object> feedback = oracle()->GetInfo(expr->CallNewFeedbackId());
- ASSERT(feedback->IsSmi());
-
// TODO(mvstanton): It would be better to use the already created global
// property cell that is shared by full code gen. That way, any transition
// information that happened after crankshaft won't be lost. The right
// way to do that is to begin passing the cell to the type feedback oracle
// instead of just the value in the cell. Do this in a follow-up checkin.
+ Handle<Object> feedback = oracle()->GetInfo(expr->CallNewFeedbackId());
+ ASSERT(feedback->IsSmi());
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(feedback);
@@ -9977,7 +10142,7 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
if (has_side_effects) AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
after = BuildIncrement(returns_original_input, expr);
- input = Pop();
+ input = environment()->ExpressionStackAt(0);
expr->RecordTypeFeedback(oracle(), zone());
HandleKeyedElementAccess(obj, key, after, expr, expr->AssignmentId(),
@@ -9985,10 +10150,10 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
true, // is_store
&has_side_effects);
- // Drop the key from the bailout environment. Overwrite the receiver
- // with the result of the operation, and the placeholder with the
- // original value if necessary.
- Drop(1);
+ // Drop the key and the original value from the bailout environment.
+ // Overwrite the receiver with the result of the operation, and the
+ // placeholder with the original value if necessary.
+ Drop(2);
environment()->SetExpressionStackAt(0, after);
if (returns_original_input) environment()->SetExpressionStackAt(1, input);
ASSERT(has_side_effects); // Stores always have side effects.
@@ -10660,7 +10825,6 @@ void HOptimizedGraphBuilder::BuildEmitDeepCopy(
elements->map() != isolate()->heap()->fixed_cow_array_map()) ?
elements->Size() : 0;
int elements_offset = *offset + object_size;
- int inobject_properties = boilerplate_object->map()->inobject_properties();
if (create_allocation_site_info) {
elements_offset += AllocationSiteInfo::kSize;
*offset += AllocationSiteInfo::kSize;
@@ -10674,28 +10838,49 @@ void HOptimizedGraphBuilder::BuildEmitDeepCopy(
// Copy in-object properties.
HValue* object_properties =
AddInstruction(new(zone) HInnerAllocatedObject(target, object_offset));
- for (int i = 0; i < inobject_properties; i++) {
+
+ Handle<DescriptorArray> descriptors(
+ boilerplate_object->map()->instance_descriptors());
+ int limit = boilerplate_object->map()->NumberOfOwnDescriptors();
+
+ for (int i = 0; i < limit; i++) {
+ PropertyDetails details = descriptors->GetDetails(i);
+ if (details.type() != FIELD) continue;
+ int index = descriptors->GetFieldIndex(i);
+ int property_offset = boilerplate_object->GetInObjectPropertyOffset(index);
+ Handle<Name> name(descriptors->GetKey(i));
Handle<Object> value =
- Handle<Object>(boilerplate_object->InObjectPropertyAt(i),
+ Handle<Object>(boilerplate_object->InObjectPropertyAt(index),
isolate());
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
Handle<JSObject> original_value_object = Handle<JSObject>::cast(
- Handle<Object>(original_boilerplate_object->InObjectPropertyAt(i),
+ Handle<Object>(original_boilerplate_object->InObjectPropertyAt(index),
isolate()));
HInstruction* value_instruction =
AddInstruction(new(zone) HInnerAllocatedObject(target, *offset));
AddInstruction(new(zone) HStoreNamedField(
- object_properties, factory->unknown_field_string(), value_instruction,
- true, boilerplate_object->GetInObjectPropertyOffset(i)));
+ object_properties, name, value_instruction, true,
+ Representation::Tagged(), property_offset));
BuildEmitDeepCopy(value_object, original_value_object, target,
offset, DONT_TRACK_ALLOCATION_SITE);
} else {
+ Representation representation = details.representation();
HInstruction* value_instruction = AddInstruction(new(zone) HConstant(
value, Representation::Tagged()));
+ if (representation.IsDouble()) {
+ HInstruction* double_box =
+ AddInstruction(new(zone) HInnerAllocatedObject(target, *offset));
+ BuildStoreMap(double_box, factory->heap_number_map());
+ AddInstruction(new(zone) HStoreNamedField(
+ double_box, name, value_instruction, true,
+ Representation::Double(), HeapNumber::kValueOffset));
+ value_instruction = double_box;
+ *offset += HeapNumber::kSize;
+ }
AddInstruction(new(zone) HStoreNamedField(
- object_properties, factory->unknown_field_string(), value_instruction,
- true, boilerplate_object->GetInObjectPropertyOffset(i)));
+ object_properties, name, value_instruction, true,
+ Representation::Tagged(), property_offset));
}
}
@@ -10790,7 +10975,7 @@ HValue* HOptimizedGraphBuilder::BuildCopyObjectHeader(
object_header,
factory->elements_field_string(),
elements,
- true, JSObject::kElementsOffset));
+ true, Representation::Tagged(), JSObject::kElementsOffset));
elements_store->SetGVNFlag(kChangesElementsPointer);
Handle<Object> properties_field =
@@ -10800,8 +10985,9 @@ HValue* HOptimizedGraphBuilder::BuildCopyObjectHeader(
properties_field, Representation::None()));
AddInstruction(new(zone) HStoreNamedField(object_header,
factory->empty_string(),
- properties,
- true, JSObject::kPropertiesOffset));
+ properties, true,
+ Representation::Tagged(),
+ JSObject::kPropertiesOffset));
if (boilerplate_object->IsJSArray()) {
Handle<JSArray> boilerplate_array =
@@ -10810,11 +10996,15 @@ HValue* HOptimizedGraphBuilder::BuildCopyObjectHeader(
Handle<Object>(boilerplate_array->length(), isolate());
HInstruction* length = AddInstruction(new(zone) HConstant(
length_field, Representation::None()));
+ ASSERT(boilerplate_array->length()->IsSmi());
+ Representation representation =
+ IsFastElementsKind(boilerplate_array->GetElementsKind())
+ ? Representation::Smi() : Representation::Tagged();
HInstruction* length_store = AddInstruction(new(zone) HStoreNamedField(
object_header,
factory->length_field_string(),
length,
- true, JSArray::kLengthOffset));
+ true, representation, JSArray::kLengthOffset));
length_store->SetGVNFlag(kChangesArrayLengths);
}
@@ -11207,6 +11397,7 @@ void HOptimizedGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
name,
value,
true, // in-object store.
+ Representation::Tagged(),
JSValue::kValueOffset));
if_js_value->Goto(join);
join->SetJoinId(call->id());
diff --git a/deps/v8/src/hydrogen.h b/deps/v8/src/hydrogen.h
index ab721bd3a..a95424a1c 100644
--- a/deps/v8/src/hydrogen.h
+++ b/deps/v8/src/hydrogen.h
@@ -982,6 +982,11 @@ class HGraphBuilder {
HValue* BuildCheckMap(HValue* obj, Handle<Map> map);
// Building common constructs
+ HLoadNamedField* DoBuildLoadNamedField(HValue* object,
+ bool inobject,
+ Representation representation,
+ int offset);
+
HInstruction* BuildExternalArrayElementAccess(
HValue* external_elements,
HValue* checked_key,
@@ -1025,6 +1030,8 @@ class HGraphBuilder {
HInstruction* BuildStoreMap(HValue* object, HValue* map);
HInstruction* BuildStoreMap(HValue* object, Handle<Map> map);
+ HLoadNamedField* AddLoadElements(HValue *object, HValue *typecheck = NULL);
+
class IfBuilder {
public:
explicit IfBuilder(HGraphBuilder* builder,
@@ -1317,6 +1324,9 @@ class HGraphBuilder {
int previous_object_size,
HValue* payload);
+ HInstruction* BuildGetNativeContext(HValue* context);
+ HInstruction* BuildGetArrayFunction(HValue* context);
+
private:
HGraphBuilder();
CompilationInfo* info_;
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index 96d241186..289723497 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -30,6 +30,7 @@
#if defined(V8_TARGET_ARCH_IA32)
#include "bootstrapper.h"
+#include "builtins-decls.h"
#include "code-stubs.h"
#include "isolate.h"
#include "jsregexp.h"
@@ -79,6 +80,28 @@ void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
}
+void LoadFieldStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->stack_parameter_count_ = NULL;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->stack_parameter_count_ = NULL;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -107,9 +130,10 @@ static void InitializeArrayConstructorDescriptor(
int constant_stack_parameter_count) {
// register state
// eax -- number of arguments
+ // edi -- function
// ebx -- type info cell with elements kind
- static Register registers[] = { ebx };
- descriptor->register_param_count_ = 1;
+ static Register registers[] = { edi, ebx };
+ descriptor->register_param_count_ = 2;
if (constant_stack_parameter_count != 0) {
// stack param count needs (constructor pointer, and single argument)
@@ -3298,12 +3322,6 @@ void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
}
-void LoadFieldStub::Generate(MacroAssembler* masm) {
- StubCompiler::DoGenerateFastPropertyLoad(masm, eax, reg_, inobject_, index_);
- __ ret(0);
-}
-
-
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The key is in edx and the parameter count is in eax.
@@ -4758,6 +4776,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
Handle<Object> terminal_kind_sentinel =
TypeFeedbackCells::MonomorphicArraySentinel(isolate,
LAST_FAST_ELEMENTS_KIND);
+ __ JumpIfNotSmi(ecx, &miss);
__ cmp(ecx, Immediate(terminal_kind_sentinel));
__ j(above, &miss);
// Load the global or builtins object from the current context
@@ -5821,8 +5840,33 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ mov(FieldOperand(ecx, ConsString::kLengthOffset), ebx);
__ mov(FieldOperand(ecx, ConsString::kHashFieldOffset),
Immediate(String::kEmptyHashField));
+
+ Label skip_write_barrier, after_writing;
+ ExternalReference high_promotion_mode = ExternalReference::
+ new_space_high_promotion_mode_active_address(masm->isolate());
+ __ test(Operand::StaticVariable(high_promotion_mode), Immediate(1));
+ __ j(zero, &skip_write_barrier);
+
__ mov(FieldOperand(ecx, ConsString::kFirstOffset), eax);
+ __ RecordWriteField(ecx,
+ ConsString::kFirstOffset,
+ eax,
+ ebx,
+ kDontSaveFPRegs);
__ mov(FieldOperand(ecx, ConsString::kSecondOffset), edx);
+ __ RecordWriteField(ecx,
+ ConsString::kSecondOffset,
+ edx,
+ ebx,
+ kDontSaveFPRegs);
+ __ jmp(&after_writing);
+
+ __ bind(&skip_write_barrier);
+ __ mov(FieldOperand(ecx, ConsString::kFirstOffset), eax);
+ __ mov(FieldOperand(ecx, ConsString::kSecondOffset), edx);
+
+ __ bind(&after_writing);
+
__ mov(eax, ecx);
__ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
@@ -7354,8 +7398,10 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
{ REG(edx), REG(eax), REG(edi), EMIT_REMEMBERED_SET},
// StoreArrayLiteralElementStub::Generate
{ REG(ebx), REG(eax), REG(ecx), EMIT_REMEMBERED_SET},
- // FastNewClosureStub
+ // FastNewClosureStub and StringAddStub::Generate
{ REG(ecx), REG(edx), REG(ebx), EMIT_REMEMBERED_SET},
+ // StringAddStub::Generate
+ { REG(ecx), REG(eax), REG(ebx), EMIT_REMEMBERED_SET},
// Null termination.
{ REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
};
@@ -7876,15 +7922,8 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Get the elements kind and case on that.
__ cmp(ebx, Immediate(undefined_sentinel));
__ j(equal, &no_info);
- __ mov(edx, FieldOperand(ebx, kPointerSize));
-
- // There is no info if the call site went megamorphic either
-
- // TODO(mvstanton): Really? I thought if it was the array function that
- // the cell wouldn't get stamped as megamorphic.
- __ cmp(edx, Immediate(TypeFeedbackCells::MegamorphicSentinel(
- masm->isolate())));
- __ j(equal, &no_info);
+ __ mov(edx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset));
+ __ JumpIfNotSmi(edx, &no_info);
__ SmiUntag(edx);
__ jmp(&switch_ready);
__ bind(&no_info);
diff --git a/deps/v8/src/ia32/debug-ia32.cc b/deps/v8/src/ia32/debug-ia32.cc
index d153e18ee..a4c6bcc67 100644
--- a/deps/v8/src/ia32/debug-ia32.cc
+++ b/deps/v8/src/ia32/debug-ia32.cc
@@ -240,6 +240,15 @@ void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
}
+void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
+ // Register state for CompareNil IC
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, eax.bit(), 0, false);
+}
+
+
void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
// Register state for keyed IC call call (from ic-ia32.cc)
// ----------- S t a t e -------------
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index f71a76dd0..5a780197c 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -1529,7 +1529,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
? ObjectLiteral::kHasFunction
: ObjectLiteral::kNoFlags;
int properties_count = constant_properties->length() / 2;
- if (expr->depth() > 1) {
+ if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
+ expr->depth() > 1) {
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
__ push(Immediate(Smi::FromInt(expr->literal_index())));
@@ -1900,11 +1901,12 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
Label resume;
__ CompareRoot(result_register(), Heap::kTheHoleValueRootIndex);
__ j(not_equal, &resume);
- __ pop(result_register());
if (expr->yield_kind() == Yield::SUSPEND) {
- // TODO(wingo): Box into { value: VALUE, done: false }.
+ EmitReturnIteratorResult(false);
+ } else {
+ __ pop(result_register());
+ EmitReturnSequence();
}
- EmitReturnSequence();
__ bind(&resume);
context()->Plug(result_register());
@@ -1916,18 +1918,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(FieldOperand(result_register(),
JSGeneratorObject::kContinuationOffset),
Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
- __ pop(result_register());
- // TODO(wingo): Box into { value: VALUE, done: true }.
-
- // Exit all nested statements.
- NestedStatement* current = nesting_stack_;
- int stack_depth = 0;
- int context_length = 0;
- while (current != NULL) {
- current = current->Exit(&stack_depth, &context_length);
- }
- __ Drop(stack_depth);
- EmitReturnSequence();
+ EmitReturnIteratorResult(true);
break;
}
@@ -2033,6 +2024,54 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
}
+void FullCodeGenerator::EmitReturnIteratorResult(bool done) {
+ Label gc_required;
+ Label allocated;
+
+ Handle<Map> map(isolate()->native_context()->generator_result_map());
+
+ __ Allocate(map->instance_size(), eax, ecx, edx, &gc_required, TAG_OBJECT);
+
+ __ bind(&allocated);
+ __ mov(ebx, map);
+ __ pop(ecx);
+ __ mov(edx, isolate()->factory()->ToBoolean(done));
+ ASSERT_EQ(map->instance_size(), 5 * kPointerSize);
+ __ mov(FieldOperand(eax, HeapObject::kMapOffset), ebx);
+ __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
+ isolate()->factory()->empty_fixed_array());
+ __ mov(FieldOperand(eax, JSObject::kElementsOffset),
+ isolate()->factory()->empty_fixed_array());
+ __ mov(FieldOperand(eax, JSGeneratorObject::kResultValuePropertyOffset), ecx);
+ __ mov(FieldOperand(eax, JSGeneratorObject::kResultDonePropertyOffset), edx);
+
+ // Only the value field needs a write barrier, as the other values are in the
+ // root set.
+ __ RecordWriteField(eax, JSGeneratorObject::kResultValuePropertyOffset,
+ ecx, edx, kDontSaveFPRegs);
+
+ if (done) {
+ // Exit all nested statements.
+ NestedStatement* current = nesting_stack_;
+ int stack_depth = 0;
+ int context_length = 0;
+ while (current != NULL) {
+ current = current->Exit(&stack_depth, &context_length);
+ }
+ __ Drop(stack_depth);
+ }
+
+ EmitReturnSequence();
+
+ __ bind(&gc_required);
+ __ Push(Smi::FromInt(map->instance_size()));
+ __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ mov(context_register(),
+ Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ jmp(&allocated);
+}
+
+
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc
index 964db0eb1..e05031b8e 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ia32/ic-ia32.cc
@@ -1530,6 +1530,26 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
}
+void StoreIC::GenerateSlow(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ __ pop(ebx);
+ __ push(edx);
+ __ push(ecx);
+ __ push(eax);
+ __ push(ebx); // return address
+
+ // Do tail-call to runtime routine.
+ ExternalReference ref(IC_Utility(kStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc
index d93c27ad2..1d9e9421b 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc
@@ -113,6 +113,10 @@ void LCodeGen::FinishCode(Handle<Code> code) {
prototype_maps_.at(i)->AddDependentCode(
DependentCode::kPrototypeCheckGroup, code);
}
+ for (int i = 0 ; i < transition_maps_.length(); i++) {
+ transition_maps_.at(i)->AddDependentCode(
+ DependentCode::kTransitionGroup, code);
+ }
}
@@ -1230,7 +1234,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ and_(dividend, divisor - 1);
__ bind(&done);
} else {
- Label done, remainder_eq_dividend, slow, do_subtraction, both_positive;
+ Label done, remainder_eq_dividend, slow, both_positive;
Register left_reg = ToRegister(instr->left());
Register right_reg = ToRegister(instr->right());
Register result_reg = ToRegister(instr->result());
@@ -1266,23 +1270,10 @@ void LCodeGen::DoModI(LModI* instr) {
__ mov(scratch, right_reg);
__ sub(Operand(scratch), Immediate(1));
__ test(scratch, Operand(right_reg));
- __ j(not_zero, &do_subtraction, Label::kNear);
+ __ j(not_zero, &slow, Label::kNear);
__ and_(left_reg, Operand(scratch));
__ jmp(&remainder_eq_dividend, Label::kNear);
- __ bind(&do_subtraction);
- const int kUnfolds = 3;
- // Try a few subtractions of the dividend.
- __ mov(scratch, left_reg);
- for (int i = 0; i < kUnfolds; i++) {
- // Reduce the dividend by the divisor.
- __ sub(left_reg, Operand(right_reg));
- // Check if the dividend is less than the divisor.
- __ cmp(left_reg, Operand(right_reg));
- __ j(less, &remainder_eq_dividend, Label::kNear);
- }
- __ mov(left_reg, scratch);
-
// Slow case, using idiv instruction.
__ bind(&slow);
@@ -1915,16 +1906,24 @@ void LCodeGen::DoThrow(LThrow* instr) {
void LCodeGen::DoAddI(LAddI* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
- ASSERT(left->Equals(instr->result()));
- if (right->IsConstantOperand()) {
- __ add(ToOperand(left), ToInteger32Immediate(right));
+ if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
+ if (right->IsConstantOperand()) {
+ int32_t offset = ToInteger32(LConstantOperand::cast(right));
+ __ lea(ToRegister(instr->result()), MemOperand(ToRegister(left), offset));
+ } else {
+ Operand address(ToRegister(left), ToRegister(right), times_1, 0);
+ __ lea(ToRegister(instr->result()), address);
+ }
} else {
- __ add(ToRegister(left), ToOperand(right));
- }
-
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
+ if (right->IsConstantOperand()) {
+ __ add(ToOperand(left), ToInteger32Immediate(right));
+ } else {
+ __ add(ToRegister(left), ToOperand(right));
+ }
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ DeoptimizeIf(overflow, instr->environment());
+ }
}
}
@@ -2956,13 +2955,27 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+ int offset = instr->hydrogen()->offset();
Register object = ToRegister(instr->object());
+ if (FLAG_track_double_fields &&
+ instr->hydrogen()->representation().IsDouble()) {
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ XMMRegister result = ToDoubleRegister(instr->result());
+ __ movdbl(result, FieldOperand(object, offset));
+ } else {
+ PushX87DoubleOperand(FieldOperand(object, offset));
+ CurrentInstructionReturnsX87Result();
+ }
+ return;
+ }
+
Register result = ToRegister(instr->result());
if (instr->hydrogen()->is_in_object()) {
- __ mov(result, FieldOperand(object, instr->hydrogen()->offset()));
+ __ mov(result, FieldOperand(object, offset));
} else {
__ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
- __ mov(result, FieldOperand(result, instr->hydrogen()->offset()));
+ __ mov(result, FieldOperand(result, offset));
}
}
@@ -3146,41 +3159,6 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
}
-void LCodeGen::DoLoadElements(LLoadElements* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->object());
- __ mov(result, FieldOperand(input, JSObject::kElementsOffset));
- if (FLAG_debug_code) {
- Label done, ok, fail;
- __ cmp(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(factory()->fixed_array_map()));
- __ j(equal, &done, Label::kNear);
- __ cmp(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(factory()->fixed_cow_array_map()));
- __ j(equal, &done, Label::kNear);
- Register temp((result.is(eax)) ? ebx : eax);
- __ push(temp);
- __ mov(temp, FieldOperand(result, HeapObject::kMapOffset));
- __ movzx_b(temp, FieldOperand(temp, Map::kBitField2Offset));
- __ and_(temp, Map::kElementsKindMask);
- __ shr(temp, Map::kElementsKindShift);
- __ cmp(temp, GetInitialFastElementsKind());
- __ j(less, &fail, Label::kNear);
- __ cmp(temp, TERMINAL_FAST_ELEMENTS_KIND);
- __ j(less_equal, &ok, Label::kNear);
- __ cmp(temp, FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
- __ j(less, &fail, Label::kNear);
- __ cmp(temp, LAST_EXTERNAL_ARRAY_ELEMENTS_KIND);
- __ j(less_equal, &ok, Label::kNear);
- __ bind(&fail);
- __ Abort("Check for fast or external elements failed.");
- __ bind(&ok);
- __ pop(temp);
- __ bind(&done);
- }
-}
-
-
void LCodeGen::DoLoadExternalArrayPointer(
LLoadExternalArrayPointer* instr) {
Register result = ToRegister(instr->result());
@@ -4213,8 +4191,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
__ Set(eax, Immediate(instr->arity()));
__ mov(ebx, instr->hydrogen()->property_cell());
- Object* cell_value = instr->hydrogen()->property_cell()->value();
- ElementsKind kind = static_cast<ElementsKind>(Smi::cast(cell_value)->value());
+ ElementsKind kind = instr->hydrogen()->elements_kind();
if (instr->arity() == 0) {
ArrayNoArgumentConstructorStub stub(kind);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
@@ -4241,16 +4218,51 @@ void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+ Representation representation = instr->representation();
+
Register object = ToRegister(instr->object());
+
int offset = instr->offset();
- if (!instr->transition().is_null()) {
+ Handle<Map> transition = instr->transition();
+
+ if (FLAG_track_fields && representation.IsSmi()) {
+ if (instr->value()->IsConstantOperand()) {
+ LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
+ if (!IsInteger32(operand_value)) {
+ DeoptimizeIf(no_condition, instr->environment());
+ }
+ } else {
+ Register value = ToRegister(instr->value());
+ __ SmiTag(value);
+ if (!instr->hydrogen()->value()->range()->IsInSmiRange()) {
+ DeoptimizeIf(overflow, instr->environment());
+ }
+ }
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ ASSERT(transition.is_null());
+ ASSERT(instr->is_in_object());
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ XMMRegister value = ToDoubleRegister(instr->value());
+ __ movdbl(FieldOperand(object, offset), value);
+ } else {
+ __ fstp_d(FieldOperand(object, offset));
+ }
+ return;
+ }
+
+ if (!transition.is_null()) {
+ if (transition->CanBeDeprecated()) {
+ transition_maps_.Add(transition, info()->zone());
+ }
if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
- __ mov(FieldOperand(object, HeapObject::kMapOffset), instr->transition());
+ __ mov(FieldOperand(object, HeapObject::kMapOffset), transition);
} else {
Register temp = ToRegister(instr->temp());
Register temp_map = ToRegister(instr->temp_map());
- __ mov(temp_map, instr->transition());
+ __ mov(temp_map, transition);
__ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map);
// Update the write barrier for the map field.
__ RecordWriteField(object,
@@ -4286,6 +4298,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
__ mov(FieldOperand(write_register, offset), ToRegister(operand_value));
} else {
Handle<Object> handle_value = ToHandle(operand_value);
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
__ mov(FieldOperand(write_register, offset), handle_value);
}
} else {
@@ -5459,6 +5472,8 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
} else {
mode = NUMBER_CANDIDATE_IS_SMI;
}
+ } else {
+ mode = NUMBER_CANDIDATE_IS_SMI;
}
}
@@ -6025,18 +6040,24 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
- Register size = ToRegister(instr->size());
Register result = ToRegister(instr->result());
- __ SmiTag(size);
- PushSafepointRegistersScope scope(this);
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
- if (!size.is(result)) {
- __ StoreToSafepointRegisterSlot(result, size);
+ __ mov(result, Immediate(Smi::FromInt(0)));
+
+ PushSafepointRegistersScope scope(this);
+ if (instr->size()->IsRegister()) {
+ Register size = ToRegister(instr->size());
+ ASSERT(!size.is(result));
+ __ SmiTag(ToRegister(instr->size()));
+ __ push(size);
+ } else {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ push(Immediate(Smi::FromInt(size)));
}
- __ push(size);
+
if (instr->hydrogen()->CanAllocateInOldPointerSpace()) {
CallRuntimeFromDeferred(
Runtime::kAllocateInOldPointerSpace, 1, instr, instr->context());
@@ -6125,7 +6146,8 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
// Set up the parameters to the stub/runtime call and pick the right
// runtime function or stub to call.
int properties_count = instr->hydrogen()->constant_properties_length() / 2;
- if (instr->hydrogen()->depth() > 1) {
+ if ((FLAG_track_double_fields && instr->hydrogen()->may_store_doubles()) ||
+ instr->hydrogen()->depth() > 1) {
__ PushHeapObject(literals);
__ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
__ push(Immediate(constant_properties));
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.h b/deps/v8/src/ia32/lithium-codegen-ia32.h
index 5b44d87b0..1fea25bdb 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.h
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.h
@@ -59,6 +59,7 @@ class LCodeGen BASE_EMBEDDED {
jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
prototype_maps_(0, info->zone()),
+ transition_maps_(0, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
@@ -415,6 +416,7 @@ class LCodeGen BASE_EMBEDDED {
ZoneList<JumpTableEntry> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
ZoneList<Handle<Map> > prototype_maps_;
+ ZoneList<Handle<Map> > transition_maps_;
int inlined_function_count_;
Scope* const scope_;
Status status_;
diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc
index c023fd15b..6c9098e75 100644
--- a/deps/v8/src/ia32/lithium-ia32.cc
+++ b/deps/v8/src/ia32/lithium-ia32.cc
@@ -601,6 +601,11 @@ LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
}
+LOperand* LChunkBuilder::UseConstant(HValue* value) {
+ return chunk_->DefineConstantOperand(HConstant::cast(value));
+}
+
+
LOperand* LChunkBuilder::UseAny(HValue* value) {
return value->IsConstant()
? chunk_->DefineConstantOperand(HConstant::cast(value))
@@ -731,7 +736,7 @@ LUnallocated* LChunkBuilder::TempRegister() {
int vreg = allocator_->GetVirtualRegister();
if (!allocator_->AllocationOk()) {
Abort("Out of virtual registers while trying to allocate temp register.");
- return NULL;
+ vreg = 0;
}
operand->set_virtual_register(vreg);
return operand;
@@ -829,8 +834,8 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
ASSERT(op != Token::MOD);
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
return DefineSameAsFirst(result);
}
@@ -1387,8 +1392,8 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
return DefineSameAsFirst(new(zone()) LBitI(left, right));
} else {
ASSERT(instr->representation().IsTagged());
@@ -1555,8 +1560,8 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstant(instr->MostConstantOperand());
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseOrConstant(instr->BetterRightOperand());
LOperand* temp = NULL;
if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
temp = TempRegister();
@@ -1599,13 +1604,24 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
if (instr->representation().IsInteger32()) {
+ // Check to see if it would be advantageous to use an lea instruction rather
+ // than an add. This is the case when no overflow check is needed and there
+ // are multiple uses of the add's inputs, so using a 3-register add will
+ // preserve all input values for later uses.
+ bool use_lea = LAddI::UseLea(instr);
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ HValue* right_candidate = instr->BetterRightOperand();
+ LOperand* right = use_lea
+ ? UseRegisterOrConstantAtStart(right_candidate)
+ : UseOrConstantAtStart(right_candidate);
LAddI* add = new(zone()) LAddI(left, right);
- LInstruction* result = DefineSameAsFirst(add);
- if (instr->CheckFlag(HValue::kCanOverflow)) {
+ bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
+ LInstruction* result = use_lea
+ ? DefineAsRegister(add)
+ : DefineSameAsFirst(add);
+ if (can_overflow) {
result = AssignEnvironment(result);
}
return result;
@@ -1624,8 +1640,8 @@ LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- left = UseRegisterAtStart(instr->LeastConstantOperand());
- right = UseOrConstantAtStart(instr->MostConstantOperand());
+ left = UseRegisterAtStart(instr->BetterLeftOperand());
+ right = UseOrConstantAtStart(instr->BetterRightOperand());
} else {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->left()->representation().IsDouble());
@@ -2166,7 +2182,6 @@ LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
- ASSERT(instr->representation().IsTagged());
LOperand* obj = UseRegisterAtStart(instr->object());
return DefineAsRegister(new(zone()) LLoadNamedField(obj));
}
@@ -2207,12 +2222,6 @@ LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
}
-LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LLoadElements(input));
-}
-
-
LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
HLoadExternalArrayPointer* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
@@ -2287,19 +2296,6 @@ LOperand* LChunkBuilder::GetStoreKeyedValueOperand(HStoreKeyed* instr) {
}
-// DoStoreKeyed and DoStoreNamedField have special considerations for allowing
-// use of a constant instead of a register.
-static bool StoreConstantValueAllowed(HValue* value) {
- if (value->IsConstant()) {
- HConstant* constant_value = HConstant::cast(value);
- return constant_value->HasSmiValue()
- || constant_value->HasDoubleValue()
- || constant_value->ImmortalImmovable();
- }
- return false;
-}
-
-
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
if (!instr->is_external()) {
ASSERT(instr->elements()->representation().IsTagged());
@@ -2327,17 +2323,8 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
val = UseTempRegister(instr->value());
key = UseTempRegister(instr->key());
} else {
- if (StoreConstantValueAllowed(instr->value())) {
- val = UseRegisterOrConstantAtStart(instr->value());
- } else {
- val = UseRegisterAtStart(instr->value());
- }
-
- if (StoreConstantValueAllowed(instr->key())) {
- key = UseRegisterOrConstantAtStart(instr->key());
- } else {
- key = UseRegisterAtStart(instr->key());
- }
+ val = UseRegisterOrConstantAtStart(instr->value());
+ key = UseRegisterOrConstantAtStart(instr->key());
}
return new(zone()) LStoreKeyed(obj, key, val);
}
@@ -2438,11 +2425,24 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
: UseRegisterAtStart(instr->object());
}
+ bool can_be_constant = instr->value()->IsConstant() &&
+ HConstant::cast(instr->value())->NotInNewSpace() &&
+ !(FLAG_track_double_fields && instr->field_representation().IsDouble());
+
LOperand* val;
if (needs_write_barrier) {
val = UseTempRegister(instr->value());
- } else if (StoreConstantValueAllowed(instr->value())) {
+ } else if (can_be_constant) {
val = UseRegisterOrConstant(instr->value());
+ } else if (FLAG_track_fields && instr->field_representation().IsSmi()) {
+ val = UseTempRegister(instr->value());
+ } else if (FLAG_track_double_fields &&
+ instr->field_representation().IsDouble()) {
+ if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ val = UseRegisterAtStart(instr->value());
+ } else {
+ val = UseX87TopOfStack(instr->value());
+ }
} else {
val = UseRegister(instr->value());
}
@@ -2450,12 +2450,17 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
// We only need a scratch register if we have a write barrier or we
// have a store into the properties array (not in-object-property).
LOperand* temp = (!instr->is_in_object() || needs_write_barrier ||
- needs_write_barrier_for_map) ? TempRegister() : NULL;
+ needs_write_barrier_for_map) ? TempRegister() : NULL;
// We need a temporary register for write barrier of the map field.
LOperand* temp_map = needs_write_barrier_for_map ? TempRegister() : NULL;
- return new(zone()) LStoreNamedField(obj, val, temp, temp_map);
+ LStoreNamedField* result =
+ new(zone()) LStoreNamedField(obj, val, temp, temp_map);
+ if (FLAG_track_fields && instr->field_representation().IsSmi()) {
+ return AssignEnvironment(result);
+ }
+ return result;
}
@@ -2516,8 +2521,9 @@ LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling();
LOperand* context = UseAny(instr->context());
- // TODO(mvstanton): why can't size be a constant if possible?
- LOperand* size = UseTempRegister(instr->size());
+ LOperand* size = instr->size()->IsConstant()
+ ? UseConstant(instr->size())
+ : UseTempRegister(instr->size());
LOperand* temp = TempRegister();
LAllocate* result = new(zone()) LAllocate(context, size, temp);
return AssignPointerMap(DefineAsRegister(result));
@@ -2587,7 +2593,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
- if (spill_index > LUnallocated::kMaxFixedIndex) {
+ if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
Abort("Too many spill slots needed for OSR");
spill_index = 0;
}
diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h
index 49462cb88..820241891 100644
--- a/deps/v8/src/ia32/lithium-ia32.h
+++ b/deps/v8/src/ia32/lithium-ia32.h
@@ -117,7 +117,6 @@ class LCodeGen;
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
- V(LoadElements) \
V(LoadExternalArrayPointer) \
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
@@ -1370,6 +1369,11 @@ class LAddI: public LTemplateInstruction<1, 2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
+ static bool UseLea(HAdd* add) {
+ return !add->CheckFlag(HValue::kCanOverflow) &&
+ add->BetterLeftOperand()->UseCount() > 1;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
DECLARE_HYDROGEN_ACCESSOR(Add)
};
@@ -1496,6 +1500,11 @@ class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = object;
}
+ virtual bool ClobbersDoubleRegisters() const {
+ return !CpuFeatures::IsSupported(SSE2) &&
+ !hydrogen()->representation().IsDouble();
+ }
+
LOperand* object() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
@@ -1550,18 +1559,6 @@ class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 1> {
};
-class LLoadElements: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadElements(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
-};
-
-
class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadExternalArrayPointer(LOperand* object) {
@@ -2206,6 +2203,9 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 2> {
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
Handle<Map> transition() const { return hydrogen()->transition(); }
+ Representation representation() const {
+ return hydrogen()->field_representation();
+ }
};
@@ -2908,6 +2908,9 @@ class LChunkBuilder BASE_EMBEDDED {
MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+ // An input operand in a constant operand.
+ MUST_USE_RESULT LOperand* UseConstant(HValue* value);
+
// An input operand in register, stack slot or a constant operand.
// Will not be moved to a register even if one is freely available.
MUST_USE_RESULT LOperand* UseAny(HValue* value);
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 733dbdb03..175b1ca1d 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -1603,10 +1603,32 @@ void MacroAssembler::AllocateAsciiConsString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
- // Allocate heap number in new space.
- Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ Label allocate_new_space, install_map;
+ AllocationFlags flags = TAG_OBJECT;
+
+ ExternalReference high_promotion_mode = ExternalReference::
+ new_space_high_promotion_mode_active_address(isolate());
+
+ test(Operand::StaticVariable(high_promotion_mode), Immediate(1));
+ j(zero, &allocate_new_space);
+
+ Allocate(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
+ jmp(&install_map);
+ bind(&allocate_new_space);
+ Allocate(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ flags);
+
+ bind(&install_map);
// Set the map. The other fields are left uninitialized.
mov(FieldOperand(result, HeapObject::kMapOffset),
Immediate(isolate()->factory()->cons_ascii_string_map()));
@@ -2889,6 +2911,18 @@ void MacroAssembler::CheckPageFlagForMap(
}
+void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated) {
+ if (map->CanBeDeprecated()) {
+ mov(scratch, map);
+ mov(scratch, FieldOperand(scratch, Map::kBitField3Offset));
+ and_(scratch, Immediate(Smi::FromInt(Map::Deprecated::kMask)));
+ j(not_zero, if_deprecated);
+ }
+}
+
+
void MacroAssembler::JumpIfBlack(Register object,
Register scratch0,
Register scratch1,
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index 2b7641c9c..519652ac3 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -91,6 +91,10 @@ class MacroAssembler: public Assembler {
Label* condition_met,
Label::Distance condition_met_distance = Label::kFar);
+ void CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated);
+
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but scratch will be clobbered.
void JumpIfNotInNewSpace(Register object,
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index a44beec29..9623b9a52 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -369,11 +369,13 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
}
-void StubCompiler::DoGenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst,
- Register src,
- bool inobject,
- int index) {
+void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
+ Register dst,
+ Register src,
+ bool inobject,
+ int index,
+ Representation representation) {
+ ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
int offset = index * kPointerSize;
if (!inobject) {
// Calculate the offset into the properties array.
@@ -763,8 +765,10 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
Register value_reg,
Register scratch1,
Register scratch2,
+ Register unused,
Label* miss_label,
- Label* miss_restore_name) {
+ Label* miss_restore_name,
+ Label* slow) {
// Check that the map of the object hasn't changed.
__ CheckMap(receiver_reg, Handle<Map>(object->map()),
miss_label, DO_SMI_CHECK, REQUIRE_EXACT_MAP);
@@ -774,6 +778,15 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ CheckAccessGlobalProxy(receiver_reg, scratch1, scratch2, miss_label);
}
+ int descriptor = transition->LastAdded();
+ DescriptorArray* descriptors = transition->instance_descriptors();
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+ Representation representation = details.representation();
+ ASSERT(!representation.IsNone());
+
+ // Ensure no transitions to deprecated maps are followed.
+ __ CheckMapDeprecated(transition, scratch1, miss_label);
+
// Check that we are allowed to write this.
if (object->GetPrototype()->IsJSObject()) {
JSObject* holder;
@@ -790,7 +803,7 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// We need an extra register, push
Register holder_reg = CheckPrototypes(
object, receiver_reg, Handle<JSObject>(holder), name_reg,
- scratch1, scratch2, name, miss_restore_name);
+ scratch1, scratch2, name, miss_restore_name, SKIP_RECEIVER);
// If no property was found, and the holder (the last object in the
// prototype chain) is in slow mode, we need to do a negative lookup on the
// holder.
@@ -809,6 +822,46 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
}
}
+ Register storage_reg = name_reg;
+
+ if (FLAG_track_fields && representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_restore_name);
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ Label do_store, heap_number;
+ __ AllocateHeapNumber(storage_reg, scratch1, scratch2, slow);
+
+ __ JumpIfNotSmi(value_reg, &heap_number);
+ __ SmiUntag(value_reg);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope use_sse2(masm, SSE2);
+ __ cvtsi2sd(xmm0, value_reg);
+ } else {
+ __ push(value_reg);
+ __ fild_s(Operand(esp, 0));
+ __ pop(value_reg);
+ }
+ __ SmiTag(value_reg);
+ __ jmp(&do_store);
+
+ __ bind(&heap_number);
+ __ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
+ miss_restore_name, DONT_DO_SMI_CHECK, REQUIRE_EXACT_MAP);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope use_sse2(masm, SSE2);
+ __ movdbl(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
+ } else {
+ __ fld_d(FieldOperand(value_reg, HeapNumber::kValueOffset));
+ }
+
+ __ bind(&do_store);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope use_sse2(masm, SSE2);
+ __ movdbl(FieldOperand(storage_reg, HeapNumber::kValueOffset), xmm0);
+ } else {
+ __ fstp_d(FieldOperand(storage_reg, HeapNumber::kValueOffset));
+ }
+ }
+
// Stub never generated for non-global objects that require access
// checks.
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
@@ -820,7 +873,7 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ pop(scratch1); // Return address.
__ push(receiver_reg);
__ push(Immediate(transition));
- __ push(eax);
+ __ push(value_reg);
__ push(scratch1);
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
@@ -834,12 +887,11 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ mov(scratch1, Immediate(transition));
__ mov(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
- // Update the write barrier for the map field and pass the now unused
- // name_reg as scratch register.
+ // Update the write barrier for the map field.
__ RecordWriteField(receiver_reg,
HeapObject::kMapOffset,
scratch1,
- name_reg,
+ scratch2,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
@@ -856,31 +908,51 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
- __ mov(FieldOperand(receiver_reg, offset), value_reg);
+ if (FLAG_track_double_fields && representation.IsDouble()) {
+ __ mov(FieldOperand(receiver_reg, offset), storage_reg);
+ } else {
+ __ mov(FieldOperand(receiver_reg, offset), value_reg);
+ }
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ mov(name_reg, value_reg);
- __ RecordWriteField(receiver_reg,
- offset,
- name_reg,
- scratch1,
- kDontSaveFPRegs);
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ __ mov(name_reg, value_reg);
+ } else {
+ ASSERT(storage_reg.is(name_reg));
+ }
+ __ RecordWriteField(receiver_reg,
+ offset,
+ name_reg,
+ scratch1,
+ kDontSaveFPRegs);
+ }
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array (optimistically).
__ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ mov(FieldOperand(scratch1, offset), eax);
+ if (FLAG_track_double_fields && representation.IsDouble()) {
+ __ mov(FieldOperand(scratch1, offset), storage_reg);
+ } else {
+ __ mov(FieldOperand(scratch1, offset), value_reg);
+ }
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ mov(name_reg, value_reg);
- __ RecordWriteField(scratch1,
- offset,
- name_reg,
- receiver_reg,
- kDontSaveFPRegs);
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ __ mov(name_reg, value_reg);
+ } else {
+ ASSERT(storage_reg.is(name_reg));
+ }
+ __ RecordWriteField(scratch1,
+ offset,
+ name_reg,
+ receiver_reg,
+ kDontSaveFPRegs);
+ }
}
// Return the value (register eax).
@@ -920,35 +992,91 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// object and the number of in-object properties is not going to change.
index -= object->map()->inobject_properties();
+ Representation representation = lookup->representation();
+ ASSERT(!representation.IsNone());
+ if (FLAG_track_fields && representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ // Load the double storage.
+ if (index < 0) {
+ int offset = object->map()->instance_size() + (index * kPointerSize);
+ __ mov(scratch1, FieldOperand(receiver_reg, offset));
+ } else {
+ __ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ __ mov(scratch1, FieldOperand(scratch1, offset));
+ }
+
+ // Store the value into the storage.
+ Label do_store, heap_number;
+ __ JumpIfNotSmi(value_reg, &heap_number);
+ __ SmiUntag(value_reg);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope use_sse2(masm, SSE2);
+ __ cvtsi2sd(xmm0, value_reg);
+ } else {
+ __ push(value_reg);
+ __ fild_s(Operand(esp, 0));
+ __ pop(value_reg);
+ }
+ __ SmiTag(value_reg);
+ __ jmp(&do_store);
+ __ bind(&heap_number);
+ __ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
+ miss_label, DONT_DO_SMI_CHECK, REQUIRE_EXACT_MAP);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope use_sse2(masm, SSE2);
+ __ movdbl(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
+ } else {
+ __ fld_d(FieldOperand(value_reg, HeapNumber::kValueOffset));
+ }
+ __ bind(&do_store);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope use_sse2(masm, SSE2);
+ __ movdbl(FieldOperand(scratch1, HeapNumber::kValueOffset), xmm0);
+ } else {
+ __ fstp_d(FieldOperand(scratch1, HeapNumber::kValueOffset));
+ }
+ // Return the value (register eax).
+ ASSERT(value_reg.is(eax));
+ __ ret(0);
+ return;
+ }
+
+ ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
// TODO(verwaest): Share this code as a code stub.
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
__ mov(FieldOperand(receiver_reg, offset), value_reg);
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ mov(name_reg, value_reg);
- __ RecordWriteField(receiver_reg,
- offset,
- name_reg,
- scratch1,
- kDontSaveFPRegs);
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ __ mov(name_reg, value_reg);
+ __ RecordWriteField(receiver_reg,
+ offset,
+ name_reg,
+ scratch1,
+ kDontSaveFPRegs);
+ }
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array (optimistically).
__ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ mov(FieldOperand(scratch1, offset), eax);
-
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ mov(name_reg, value_reg);
- __ RecordWriteField(scratch1,
- offset,
- name_reg,
- receiver_reg,
- kDontSaveFPRegs);
+ __ mov(FieldOperand(scratch1, offset), value_reg);
+
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ __ mov(name_reg, value_reg);
+ __ RecordWriteField(scratch1,
+ offset,
+ name_reg,
+ receiver_reg,
+ kDontSaveFPRegs);
+ }
}
// Return the value (register eax).
@@ -1197,10 +1325,20 @@ void BaseLoadStubCompiler::NonexistentHandlerFrontend(
void BaseLoadStubCompiler::GenerateLoadField(Register reg,
Handle<JSObject> holder,
- PropertyIndex index) {
- // Get the value from the properties.
- GenerateFastPropertyLoad(masm(), eax, reg, holder, index);
- __ ret(0);
+ PropertyIndex field,
+ Representation representation) {
+ if (!reg.is(receiver())) __ mov(receiver(), reg);
+ if (kind() == Code::LOAD_IC) {
+ LoadFieldStub stub(field.is_inobject(holder),
+ field.translate(holder),
+ representation);
+ GenerateTailCall(masm(), stub.GetCode(isolate()));
+ } else {
+ KeyedLoadFieldStub stub(field.is_inobject(holder),
+ field.translate(holder),
+ representation);
+ GenerateTailCall(masm(), stub.GetCode(isolate()));
+ }
}
@@ -1455,7 +1593,9 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
Register reg = CheckPrototypes(object, edx, holder, ebx, eax, edi,
name, &miss);
- GenerateFastPropertyLoad(masm(), edi, reg, holder, index);
+ GenerateFastPropertyLoad(
+ masm(), edi, reg, index.is_inobject(holder),
+ index.translate(holder), Representation::Tagged());
// Check that the function really is a function.
__ JumpIfSmi(edi, &miss);
@@ -2984,17 +3124,23 @@ Handle<Code> BaseLoadStubCompiler::CompilePolymorphicIC(
Register map_reg = scratch1();
__ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
int receiver_count = receiver_maps->length();
+ int number_of_handled_maps = 0;
for (int current = 0; current < receiver_count; ++current) {
- __ cmp(map_reg, receiver_maps->at(current));
- __ j(equal, handlers->at(current));
+ Handle<Map> map = receiver_maps->at(current);
+ if (!map->is_deprecated()) {
+ number_of_handled_maps++;
+ __ cmp(map_reg, map);
+ __ j(equal, handlers->at(current));
+ }
}
+ ASSERT(number_of_handled_maps != 0);
__ bind(&miss);
TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
InlineCacheState state =
- receiver_maps->length() > 1 ? POLYMORPHIC : MONOMORPHIC;
+ number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
return GetICCode(kind(), type, name, state);
}
diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc
index 40676abc3..31845f214 100644
--- a/deps/v8/src/ic.cc
+++ b/deps/v8/src/ic.cc
@@ -182,6 +182,15 @@ Address IC::OriginalCodeAddress() const {
static bool TryRemoveInvalidPrototypeDependentStub(Code* target,
Object* receiver,
Object* name) {
+ if (target->is_keyed_load_stub() ||
+ target->is_keyed_call_stub() ||
+ target->is_keyed_store_stub()) {
+ // Determine whether the failure is due to a name failure.
+ if (!name->IsName()) return false;
+ Name* stub_name = target->FindFirstName();
+ if (Name::cast(name) != stub_name) return false;
+ }
+
InlineCacheHolderFlag cache_holder =
Code::ExtractCacheHolderFromFlags(target->flags());
@@ -208,10 +217,30 @@ static bool TryRemoveInvalidPrototypeDependentStub(Code* target,
int index = map->IndexInCodeCache(name, target);
if (index >= 0) {
map->RemoveFromCodeCache(String::cast(name), target, index);
+ // For loads, handlers are stored in addition to the ICs on the map. Remove
+ // those, too.
+ if (target->is_load_stub() || target->is_keyed_load_stub()) {
+ Code* handler = target->FindFirstCode();
+ index = map->IndexInCodeCache(name, handler);
+ if (index >= 0) {
+ map->RemoveFromCodeCache(String::cast(name), handler, index);
+ }
+ }
return true;
}
- return false;
+ // If the IC is shared between multiple receivers (slow dictionary mode), then
+ // the map cannot be deprecated and the stub invalidated.
+ if (cache_holder != OWN_MAP) return false;
+
+ // The stub is not in the cache. We've ruled out all other kinds of failure
+ // except for proptotype chain changes, a deprecated map, or a map that's
+ // different from the one that the stub expects. If the map hasn't changed,
+ // assume it's a prototype failure. Treat deprecated maps in the same way as
+ // prototype failures (stay monomorphic if possible).
+ Map* old_map = target->FindFirstMap();
+ if (old_map == NULL) return false;
+ return old_map == map || old_map->is_deprecated();
}
@@ -221,22 +250,13 @@ IC::State IC::StateFrom(Code* target, Object* receiver, Object* name) {
if (state != MONOMORPHIC || !name->IsString()) return state;
if (receiver->IsUndefined() || receiver->IsNull()) return state;
- // For keyed load/store/call, the most likely cause of cache failure is
- // that the key has changed. We do not distinguish between
- // prototype and non-prototype failures for keyed access.
Code::Kind kind = target->kind();
- if (kind == Code::KEYED_LOAD_IC ||
- kind == Code::KEYED_STORE_IC ||
- kind == Code::KEYED_CALL_IC) {
- return MONOMORPHIC;
- }
-
// Remove the target from the code cache if it became invalid
// because of changes in the prototype chain to avoid hitting it
// again.
// Call stubs handle this later to allow extra IC state
// transitions.
- if (kind != Code::CALL_IC &&
+ if (kind != Code::CALL_IC && kind != Code::KEYED_CALL_IC &&
TryRemoveInvalidPrototypeDependentStub(target, receiver, name)) {
return MONOMORPHIC_PROTOTYPE_FAILURE;
}
@@ -506,6 +526,13 @@ MaybeObject* CallICBase::LoadFunction(State state,
Code::ExtraICState extra_ic_state,
Handle<Object> object,
Handle<String> name) {
+ if (object->IsJSObject()) {
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ if (receiver->map()->is_deprecated()) {
+ JSObject::MigrateInstance(receiver);
+ }
+ }
+
// If the object is undefined or null it's illegal to try to get any
// of its properties; throw a TypeError in that case.
if (object->IsUndefined() || object->IsNull()) {
@@ -708,8 +735,7 @@ void CallICBase::UpdateCaches(LookupResult* lookup,
TryUpdateExtraICState(lookup, object, &extra_ic_state)) {
code = ComputeMonomorphicStub(lookup, state, extra_ic_state,
object, name);
- } else if (kind_ == Code::CALL_IC &&
- TryRemoveInvalidPrototypeDependentStub(target(),
+ } else if (TryRemoveInvalidPrototypeDependentStub(target(),
*object,
*name)) {
state = MONOMORPHIC_PROTOTYPE_FAILURE;
@@ -732,15 +758,7 @@ void CallICBase::UpdateCaches(LookupResult* lookup,
case UNINITIALIZED:
case MONOMORPHIC_PROTOTYPE_FAILURE:
case PREMONOMORPHIC:
- set_target(*code);
- break;
case MONOMORPHIC:
- if (code->ic_state() != MONOMORPHIC) {
- Map* map = target()->FindFirstMap();
- if (map != NULL) {
- UpdateMegamorphicCache(map, *name, target());
- }
- }
set_target(*code);
break;
case MEGAMORPHIC: {
@@ -777,6 +795,13 @@ MaybeObject* KeyedCallIC::LoadFunction(State state,
Handle<String>::cast(key));
}
+ if (object->IsJSObject()) {
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ if (receiver->map()->is_deprecated()) {
+ JSObject::MigrateInstance(receiver);
+ }
+ }
+
if (object->IsUndefined() || object->IsNull()) {
return TypeError("non_object_property_call", object, key);
}
@@ -891,6 +916,13 @@ MaybeObject* LoadIC::Load(State state,
return Runtime::GetElementOrCharAtOrFail(isolate(), object, index);
}
+ if (object->IsJSObject()) {
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ if (receiver->map()->is_deprecated()) {
+ JSObject::MigrateInstance(receiver);
+ }
+ }
+
// Named lookup in the object.
LookupResult lookup(isolate());
LookupForRead(object, name, &lookup);
@@ -955,11 +987,30 @@ bool IC::UpdatePolymorphicIC(State state,
MapHandleList receiver_maps;
CodeHandleList handlers;
+ int number_of_valid_maps;
+ int handler_to_overwrite = -1;
+ Handle<Map> new_receiver_map(receiver->map());
{
AssertNoAllocation no_gc;
target()->FindAllMaps(&receiver_maps);
int number_of_maps = receiver_maps.length();
- if (number_of_maps >= 4) return false;
+ number_of_valid_maps = number_of_maps;
+
+ for (int i = 0; i < number_of_maps; i++) {
+ Handle<Map> map = receiver_maps.at(i);
+ // Filter out deprecated maps to ensure its instances get migrated.
+ if (map->is_deprecated()) {
+ number_of_valid_maps--;
+ // If the receiver map is already in the polymorphic IC, this indicates
+ // there was a prototoype chain failure. In that case, just overwrite the
+ // handler.
+ } else if (map.is_identical_to(new_receiver_map)) {
+ number_of_valid_maps--;
+ handler_to_overwrite = i;
+ }
+ }
+
+ if (number_of_valid_maps >= 4) return false;
// Only allow 0 maps in case target() was reset to UNINITIALIZED by the GC.
// In that case, allow the IC to go back monomorphic.
@@ -969,14 +1020,16 @@ bool IC::UpdatePolymorphicIC(State state,
target()->FindAllCode(&handlers, receiver_maps.length());
}
- if (!AddOneReceiverMapIfMissing(&receiver_maps,
- Handle<Map>(receiver->map()))) {
- return false;
+ number_of_valid_maps++;
+ if (handler_to_overwrite >= 0) {
+ handlers.Set(handler_to_overwrite, code);
+ } else {
+ receiver_maps.Add(new_receiver_map);
+ handlers.Add(code);
}
- handlers.Add(code);
Handle<Code> ic = isolate()->stub_cache()->ComputePolymorphicIC(
- &receiver_maps, &handlers, name);
+ &receiver_maps, &handlers, number_of_valid_maps, name);
set_target(*ic);
return true;
}
@@ -1063,16 +1116,9 @@ void IC::PatchCache(State state,
if (UpdatePolymorphicIC(state, strict_mode, receiver, name, code)) {
break;
}
- }
- if (target()->type() != Code::NORMAL) {
- if (target()->is_load_stub()) {
+
+ if (target()->type() != Code::NORMAL) {
CopyICToMegamorphicCache(name);
- } else {
- Code* handler = target();
- Map* map = handler->FindFirstMap();
- if (map != NULL) {
- UpdateMegamorphicCache(map, *name, handler);
- }
}
}
@@ -1175,7 +1221,7 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
}
-void IC::UpdateMegamorphicCache(Map* map, String* name, Code* code) {
+void IC::UpdateMegamorphicCache(Map* map, Name* name, Code* code) {
// Cache code holding map should be consistent with
// GenerateMonomorphicCacheProbe.
isolate()->stub_cache()->Set(name, map, code);
@@ -1195,7 +1241,8 @@ Handle<Code> LoadIC::ComputeLoadHandler(LookupResult* lookup,
switch (lookup->type()) {
case FIELD:
return isolate()->stub_cache()->ComputeLoadField(
- name, receiver, holder, lookup->GetFieldIndex());
+ name, receiver, holder,
+ lookup->GetFieldIndex(), lookup->representation());
case CONSTANT_FUNCTION: {
Handle<JSFunction> constant(lookup->GetConstantFunction());
return isolate()->stub_cache()->ComputeLoadConstant(
@@ -1237,7 +1284,7 @@ Handle<Code> LoadIC::ComputeLoadHandler(LookupResult* lookup,
PropertyIndex lengthIndex =
PropertyIndex::NewHeaderIndex(JSArray::kLengthOffset / kPointerSize);
return isolate()->stub_cache()->ComputeLoadField(
- name, receiver, holder, lengthIndex);
+ name, receiver, holder, lengthIndex, Representation::Tagged());
}
// TODO(dcarney): Handle correctly.
if (callback->IsDeclaredAccessorInfo()) break;
@@ -1366,6 +1413,10 @@ MaybeObject* KeyedLoadIC::Load(State state,
}
} else if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ if (receiver->map()->is_deprecated()) {
+ JSObject::MigrateInstance(receiver);
+ }
+
if (receiver->elements()->map() ==
isolate()->heap()->non_strict_arguments_elements_map()) {
stub = non_strict_arguments_stub();
@@ -1400,7 +1451,8 @@ Handle<Code> KeyedLoadIC::ComputeLoadHandler(LookupResult* lookup,
switch (lookup->type()) {
case FIELD:
return isolate()->stub_cache()->ComputeKeyedLoadField(
- name, receiver, holder, lookup->GetFieldIndex());
+ name, receiver, holder,
+ lookup->GetFieldIndex(), lookup->representation());
case CONSTANT_FUNCTION: {
Handle<JSFunction> constant(lookup->GetConstantFunction(), isolate());
return isolate()->stub_cache()->ComputeKeyedLoadConstant(
@@ -1432,7 +1484,9 @@ Handle<Code> KeyedLoadIC::ComputeLoadHandler(LookupResult* lookup,
static bool LookupForWrite(Handle<JSObject> receiver,
Handle<String> name,
- LookupResult* lookup) {
+ Handle<Object> value,
+ LookupResult* lookup,
+ IC::State* state) {
Handle<JSObject> holder = receiver;
receiver->Lookup(*name, lookup);
if (lookup->IsFound()) {
@@ -1444,9 +1498,10 @@ static bool LookupForWrite(Handle<JSObject> receiver,
receiver->LocalLookupRealNamedProperty(*name, lookup);
return lookup->IsFound() &&
!lookup->IsReadOnly() &&
+ lookup->CanHoldValue(value) &&
lookup->IsCacheable();
}
- return true;
+ return lookup->CanHoldValue(value);
}
if (lookup->IsPropertyCallbacks()) return true;
@@ -1464,8 +1519,25 @@ static bool LookupForWrite(Handle<JSObject> receiver,
// chain check. This avoids a double lookup, but requires us to pass in the
// receiver when trying to fetch extra information from the transition.
receiver->map()->LookupTransition(*holder, *name, lookup);
- return lookup->IsTransition() &&
- !lookup->GetTransitionDetails(receiver->map()).IsReadOnly();
+ if (!lookup->IsTransition()) return false;
+ PropertyDetails target_details =
+ lookup->GetTransitionDetails(receiver->map());
+ if (target_details.IsReadOnly()) return false;
+
+ // If the value that's being stored does not fit in the field that the
+ // instance would transition to, create a new transition that fits the value.
+ // This has to be done before generating the IC, since that IC will embed the
+ // transition target.
+ // Ensure the instance and its map were migrated before trying to update the
+ // transition target.
+ ASSERT(!receiver->map()->is_deprecated());
+ if (!value->FitsRepresentation(target_details.representation())) {
+ Handle<Map> target(lookup->GetTransitionMapFromMap(receiver->map()));
+ Map::GeneralizeRepresentation(
+ target, target->LastAdded(), value->OptimalRepresentation());
+ *state = MONOMORPHIC_PROTOTYPE_FAILURE;
+ }
+ return true;
}
@@ -1499,6 +1571,10 @@ MaybeObject* StoreIC::Store(State state,
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ if (receiver->map()->is_deprecated()) {
+ JSObject::MigrateInstance(receiver);
+ }
+
// Check if the given name is an array index.
uint32_t index;
if (name->AsArrayIndex(&index)) {
@@ -1545,7 +1621,7 @@ MaybeObject* StoreIC::Store(State state,
}
LookupResult lookup(isolate());
- if (LookupForWrite(receiver, name, &lookup)) {
+ if (LookupForWrite(receiver, name, value, &lookup, &state)) {
if (FLAG_use_ic) {
UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
}
@@ -1954,6 +2030,9 @@ MaybeObject* KeyedStoreIC::Store(State state,
if (miss_mode != MISS_FORCE_GENERIC) {
if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ if (receiver->map()->is_deprecated()) {
+ JSObject::MigrateInstance(receiver);
+ }
bool key_is_smi_like = key->IsSmi() ||
(FLAG_compiled_keyed_stores && !key->ToSmi()->IsFailure());
if (receiver->elements()->map() ==
@@ -2183,11 +2262,24 @@ RUNTIME_FUNCTION(MaybeObject*, SharedStoreIC_ExtendStorage) {
int new_unused = transition->unused_property_fields();
int new_size = old_storage->length() + new_unused + 1;
Object* result;
- { MaybeObject* maybe_result = old_storage->CopySize(new_size);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ MaybeObject* maybe_result = old_storage->CopySize(new_size);
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+
FixedArray* new_storage = FixedArray::cast(result);
- new_storage->set(old_storage->length(), value);
+
+ Object* to_store = value;
+
+ if (FLAG_track_double_fields) {
+ DescriptorArray* descriptors = transition->instance_descriptors();
+ PropertyDetails details = descriptors->GetDetails(transition->LastAdded());
+ if (details.representation().IsDouble()) {
+ MaybeObject* maybe_storage =
+ isolate->heap()->AllocateHeapNumber(value->Number());
+ if (!maybe_storage->To(&to_store)) return maybe_storage;
+ }
+ }
+
+ new_storage->set(old_storage->length(), to_store);
// Set the new property value and do the map transition.
object->set_properties(new_storage);
@@ -2229,6 +2321,24 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissFromStubFailure) {
}
+RUNTIME_FUNCTION(MaybeObject*, StoreIC_Slow) {
+ NoHandleAllocation na(isolate);
+ ASSERT(args.length() == 3);
+ StoreIC ic(IC::NO_EXTRA_FRAME, isolate);
+ Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
+ Handle<Object> object = args.at<Object>(0);
+ Handle<Object> key = args.at<Object>(1);
+ Handle<Object> value = args.at<Object>(2);
+ StrictModeFlag strict_mode = Code::GetStrictMode(extra_ic_state);
+ return Runtime::SetObjectProperty(isolate,
+ object,
+ key,
+ value,
+ NONE,
+ strict_mode);
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Slow) {
NoHandleAllocation na(isolate);
ASSERT(args.length() == 3);
diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h
index 4bf259a2f..739f34ce5 100644
--- a/deps/v8/src/ic.h
+++ b/deps/v8/src/ic.h
@@ -45,6 +45,7 @@ namespace internal {
ICU(KeyedCallIC_Miss) \
ICU(StoreIC_Miss) \
ICU(StoreIC_ArrayLength) \
+ ICU(StoreIC_Slow) \
ICU(SharedStoreIC_ExtendStorage) \
ICU(KeyedStoreIC_Miss) \
ICU(KeyedStoreIC_MissForceGeneric) \
@@ -184,7 +185,7 @@ class IC {
Handle<JSObject> receiver,
Handle<String> name,
Handle<Code> code);
- virtual void UpdateMegamorphicCache(Map* map, String* name, Code* code);
+ virtual void UpdateMegamorphicCache(Map* map, Name* name, Code* code);
virtual Handle<Code> megamorphic_stub() {
UNREACHABLE();
return Handle<Code>::null();
@@ -471,7 +472,7 @@ class KeyedLoadIC: public LoadIC {
virtual Handle<Code> ComputeLoadHandler(LookupResult* lookup,
Handle<JSObject> receiver,
Handle<String> name);
- virtual void UpdateMegamorphicCache(Map* map, String* name, Code* code) { }
+ virtual void UpdateMegamorphicCache(Map* map, Name* name, Code* code) { }
private:
// Stub accessors.
@@ -504,6 +505,7 @@ class StoreIC: public IC {
}
// Code generators for stub routines. Only called once at startup.
+ static void GenerateSlow(MacroAssembler* masm);
static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
static void GenerateMiss(MacroAssembler* masm);
static void GenerateMegamorphic(MacroAssembler* masm,
@@ -620,7 +622,7 @@ class KeyedStoreIC: public StoreIC {
StrictModeFlag strict_mode,
Handle<JSObject> receiver,
Handle<String> name);
- virtual void UpdateMegamorphicCache(Map* map, String* name, Code* code) { }
+ virtual void UpdateMegamorphicCache(Map* map, Name* name, Code* code) { }
virtual Handle<Code> megamorphic_stub() {
return isolate()->builtins()->KeyedStoreIC_Generic();
diff --git a/deps/v8/src/incremental-marking.cc b/deps/v8/src/incremental-marking.cc
index 658a34c9c..bacbb93ad 100644
--- a/deps/v8/src/incremental-marking.cc
+++ b/deps/v8/src/incremental-marking.cc
@@ -490,10 +490,16 @@ bool IncrementalMarking::WorthActivating() {
// debug tests run with incremental marking and some without.
static const intptr_t kActivationThreshold = 0;
#endif
-
+ // Only start incremental marking in a safe state: 1) when expose GC is
+ // deactivated, 2) when incremental marking is turned on, 3) when we are
+ // currently not in a GC, and 4) when we are currently not serializing
+ // or deserializing the heap.
return !FLAG_expose_gc &&
FLAG_incremental_marking &&
+ FLAG_incremental_marking_steps &&
+ heap_->gc_state() == Heap::NOT_IN_GC &&
!Serializer::enabled() &&
+ heap_->isolate()->IsInitialized() &&
heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
}
@@ -561,17 +567,21 @@ void IncrementalMarking::UncommitMarkingDeque() {
}
-void IncrementalMarking::Start() {
+void IncrementalMarking::Start(CompactionFlag flag) {
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Start\n");
}
ASSERT(FLAG_incremental_marking);
+ ASSERT(FLAG_incremental_marking_steps);
ASSERT(state_ == STOPPED);
+ ASSERT(heap_->gc_state() == Heap::NOT_IN_GC);
+ ASSERT(!Serializer::enabled());
+ ASSERT(heap_->isolate()->IsInitialized());
ResetStepCounters();
if (heap_->IsSweepingComplete()) {
- StartMarking(ALLOW_COMPACTION);
+ StartMarking(flag);
} else {
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Start sweeping.\n");
@@ -860,6 +870,17 @@ void IncrementalMarking::MarkingComplete(CompletionAction action) {
}
+void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
+ if (IsStopped() && WorthActivating() && heap_->NextGCIsLikelyToBeFull()) {
+ // TODO(hpayer): Let's play safe for now, but compaction should be
+ // in principle possible.
+ Start(PREVENT_COMPACTION);
+ } else {
+ Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD);
+ }
+}
+
+
void IncrementalMarking::Step(intptr_t allocated_bytes,
CompletionAction action) {
if (heap_->gc_state() != Heap::NOT_IN_GC ||
@@ -965,7 +986,7 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
PrintPID("Postponing speeding up marking until marking starts\n");
}
} else {
- marking_speed_ += kMarkingSpeedAccellerationInterval;
+ marking_speed_ += kMarkingSpeedAccelleration;
marking_speed_ = static_cast<int>(
Min(kMaxMarkingSpeed,
static_cast<intptr_t>(marking_speed_ * 1.3)));
diff --git a/deps/v8/src/incremental-marking.h b/deps/v8/src/incremental-marking.h
index fc5a978cc..47d5a518b 100644
--- a/deps/v8/src/incremental-marking.h
+++ b/deps/v8/src/incremental-marking.h
@@ -75,7 +75,9 @@ class IncrementalMarking {
bool WorthActivating();
- void Start();
+ enum CompactionFlag { ALLOW_COMPACTION, PREVENT_COMPACTION };
+
+ void Start(CompactionFlag flag = ALLOW_COMPACTION);
void Stop();
@@ -110,10 +112,7 @@ class IncrementalMarking {
static const intptr_t kMarkingSpeedAccelleration = 2;
static const intptr_t kMaxMarkingSpeed = 1000;
- void OldSpaceStep(intptr_t allocated) {
- Step(allocated * kFastMarking / kInitialMarkingSpeed,
- GC_VIA_STACK_GUARD);
- }
+ void OldSpaceStep(intptr_t allocated);
void Step(intptr_t allocated, CompletionAction action);
@@ -226,8 +225,6 @@ class IncrementalMarking {
void ResetStepCounters();
- enum CompactionFlag { ALLOW_COMPACTION, PREVENT_COMPACTION };
-
void StartMarking(CompactionFlag flag);
void ActivateIncrementalWriteBarrier(PagedSpace* space);
diff --git a/deps/v8/src/json-parser.h b/deps/v8/src/json-parser.h
index 74850cae2..ddc3b736e 100644
--- a/deps/v8/src/json-parser.h
+++ b/deps/v8/src/json-parser.h
@@ -381,33 +381,23 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
// First check whether there is a single expected transition. If so, try
// to parse it first.
bool follow_expected = false;
+ Handle<Map> target;
if (seq_ascii) {
key = JSObject::ExpectedTransitionKey(map);
follow_expected = !key.is_null() && ParseJsonString(key);
}
// If the expected transition hits, follow it.
if (follow_expected) {
- map = JSObject::ExpectedTransitionTarget(map);
+ target = JSObject::ExpectedTransitionTarget(map);
} else {
// If the expected transition failed, parse an internalized string and
// try to find a matching transition.
key = ParseJsonInternalizedString();
if (key.is_null()) return ReportUnexpectedCharacter();
- Handle<Map> target = JSObject::FindTransitionToField(map, key);
+ target = JSObject::FindTransitionToField(map, key);
// If a transition was found, follow it and continue.
- if (!target.is_null()) {
- map = target;
- } else {
- // If no transition was found, commit the intermediate state to the
- // object and stop transitioning.
- JSObject::TransitionToMap(json_object, map);
- int length = properties.length();
- for (int i = 0; i < length; i++) {
- json_object->FastPropertyAtPut(i, *properties[i]);
- }
- transitioning = false;
- }
+ transitioning = !target.is_null();
}
if (c0_ != ':') return ReportUnexpectedCharacter();
@@ -415,8 +405,36 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
value = ParseJsonValue();
if (value.is_null()) return ReportUnexpectedCharacter();
- properties.Add(value, zone());
- if (transitioning) continue;
+ if (transitioning) {
+ int descriptor = map->NumberOfOwnDescriptors();
+ PropertyDetails details =
+ target->instance_descriptors()->GetDetails(descriptor);
+ Representation expected_representation = details.representation();
+
+ if (value->FitsRepresentation(expected_representation)) {
+ // If the target representation is double and the value is already
+ // double, use the existing box.
+ if (FLAG_track_double_fields &&
+ value->IsSmi() &&
+ expected_representation.IsDouble()) {
+ value = factory()->NewHeapNumber(
+ Handle<Smi>::cast(value)->value());
+ }
+ properties.Add(value, zone());
+ map = target;
+ continue;
+ } else {
+ transitioning = false;
+ }
+ }
+
+ // Commit the intermediate state to the object and stop transitioning.
+ JSObject::AllocateStorageForMap(json_object, map);
+ int length = properties.length();
+ for (int i = 0; i < length; i++) {
+ Handle<Object> value = properties[i];
+ json_object->FastPropertyAtPut(i, *value);
+ }
} else {
key = ParseJsonInternalizedString();
if (key.is_null() || c0_ != ':') return ReportUnexpectedCharacter();
@@ -435,10 +453,21 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
// If we transitioned until the very end, transition the map now.
if (transitioning) {
- JSObject::TransitionToMap(json_object, map);
+ JSObject::AllocateStorageForMap(json_object, map);
int length = properties.length();
for (int i = 0; i < length; i++) {
- json_object->FastPropertyAtPut(i, *properties[i]);
+ Handle<Object> value = properties[i];
+ // If the target representation is double and the value is already
+ // double, use the existing box.
+ if (FLAG_track_double_fields && value->IsSmi()) {
+ Representation representation =
+ map->instance_descriptors()->GetDetails(i).representation();
+ if (representation.IsDouble()) {
+ value = factory()->NewHeapNumber(
+ Handle<Smi>::cast(value)->value());
+ }
+ }
+ json_object->FastPropertyAtPut(i, *value);
}
}
}
diff --git a/deps/v8/src/json-stringifier.h b/deps/v8/src/json-stringifier.h
index 47a01297a..b67a9f6b6 100644
--- a/deps/v8/src/json-stringifier.h
+++ b/deps/v8/src/json-stringifier.h
@@ -644,7 +644,7 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSObject(
Handle<Object> property;
if (details.type() == FIELD && *map == object->map()) {
property = Handle<Object>(
- object->FastPropertyAt(
+ object->RawFastPropertyAt(
map->instance_descriptors()->GetFieldIndex(i)),
isolate_);
} else {
diff --git a/deps/v8/src/list-inl.h b/deps/v8/src/list-inl.h
index d815a7e22..143c830ee 100644
--- a/deps/v8/src/list-inl.h
+++ b/deps/v8/src/list-inl.h
@@ -104,6 +104,13 @@ Vector<T> List<T, P>::AddBlock(T value, int count, P alloc) {
template<typename T, class P>
+void List<T, P>::Set(int index, const T& elm) {
+ ASSERT(index >= 0 && index <= length_);
+ data_[index] = elm;
+}
+
+
+template<typename T, class P>
void List<T, P>::InsertAt(int index, const T& elm, P alloc) {
ASSERT(index >= 0 && index <= length_);
Add(elm, alloc);
diff --git a/deps/v8/src/list.h b/deps/v8/src/list.h
index 43d982f68..0e4e35bb4 100644
--- a/deps/v8/src/list.h
+++ b/deps/v8/src/list.h
@@ -115,6 +115,9 @@ class List {
void InsertAt(int index, const T& element,
AllocationPolicy allocator = AllocationPolicy());
+ // Overwrites the element at the specific index.
+ void Set(int index, const T& element);
+
// Added 'count' elements with the value 'value' and returns a
// vector that allows access to the elements. The vector is valid
// until the next change is made to this list.
diff --git a/deps/v8/src/lithium-allocator.cc b/deps/v8/src/lithium-allocator.cc
index 7bddef7f9..74132b3b7 100644
--- a/deps/v8/src/lithium-allocator.cc
+++ b/deps/v8/src/lithium-allocator.cc
@@ -56,9 +56,11 @@ static inline LifetimePosition Max(LifetimePosition a, LifetimePosition b) {
}
-UsePosition::UsePosition(LifetimePosition pos, LOperand* operand)
+UsePosition::UsePosition(LifetimePosition pos,
+ LOperand* operand,
+ LOperand* hint)
: operand_(operand),
- hint_(NULL),
+ hint_(hint),
pos_(pos),
next_(NULL),
requires_reg_(false),
@@ -138,6 +140,7 @@ LiveRange::LiveRange(int id, Zone* zone)
next_(NULL),
current_interval_(NULL),
last_processed_use_(NULL),
+ current_hint_operand_(NULL),
spill_operand_(new(zone) LOperand()),
spill_start_index_(kMaxInt) { }
@@ -227,13 +230,6 @@ bool LiveRange::CanBeSpilled(LifetimePosition pos) {
}
-UsePosition* LiveRange::FirstPosWithHint() const {
- UsePosition* pos = first_pos_;
- while (pos != NULL && !pos->HasHint()) pos = pos->next();
- return pos;
-}
-
-
LOperand* LiveRange::CreateAssignedOperand(Zone* zone) {
LOperand* op = NULL;
if (HasRegisterAssigned()) {
@@ -375,7 +371,7 @@ bool LiveRange::ShouldBeAllocatedBefore(const LiveRange* other) const {
LifetimePosition start = Start();
LifetimePosition other_start = other->Start();
if (start.Value() == other_start.Value()) {
- UsePosition* pos = FirstPosWithHint();
+ UsePosition* pos = first_pos();
if (pos == NULL) return false;
UsePosition* other_pos = other->first_pos();
if (other_pos == NULL) return true;
@@ -449,16 +445,19 @@ void LiveRange::AddUseInterval(LifetimePosition start,
}
-UsePosition* LiveRange::AddUsePosition(LifetimePosition pos,
- LOperand* operand,
- Zone* zone) {
+void LiveRange::AddUsePosition(LifetimePosition pos,
+ LOperand* operand,
+ LOperand* hint,
+ Zone* zone) {
LAllocator::TraceAlloc("Add to live range %d use position %d\n",
id_,
pos.Value());
- UsePosition* use_pos = new(zone) UsePosition(pos, operand);
+ UsePosition* use_pos = new(zone) UsePosition(pos, operand, hint);
+ UsePosition* prev_hint = NULL;
UsePosition* prev = NULL;
UsePosition* current = first_pos_;
while (current != NULL && current->pos().Value() < pos.Value()) {
+ prev_hint = current->HasHint() ? current : prev_hint;
prev = current;
current = current->next();
}
@@ -471,7 +470,9 @@ UsePosition* LiveRange::AddUsePosition(LifetimePosition pos,
prev->next_ = use_pos;
}
- return use_pos;
+ if (prev_hint == NULL && use_pos->HasHint()) {
+ current_hint_operand_ = hint;
+ }
}
@@ -624,13 +625,13 @@ LOperand* LAllocator::AllocateFixed(LUnallocated* operand,
bool is_tagged) {
TraceAlloc("Allocating fixed reg for op %d\n", operand->virtual_register());
ASSERT(operand->HasFixedPolicy());
- if (operand->policy() == LUnallocated::FIXED_SLOT) {
- operand->ConvertTo(LOperand::STACK_SLOT, operand->fixed_index());
- } else if (operand->policy() == LUnallocated::FIXED_REGISTER) {
- int reg_index = operand->fixed_index();
+ if (operand->HasFixedSlotPolicy()) {
+ operand->ConvertTo(LOperand::STACK_SLOT, operand->fixed_slot_index());
+ } else if (operand->HasFixedRegisterPolicy()) {
+ int reg_index = operand->fixed_register_index();
operand->ConvertTo(LOperand::REGISTER, reg_index);
- } else if (operand->policy() == LUnallocated::FIXED_DOUBLE_REGISTER) {
- int reg_index = operand->fixed_index();
+ } else if (operand->HasFixedDoubleRegisterPolicy()) {
+ int reg_index = operand->fixed_register_index();
operand->ConvertTo(LOperand::DOUBLE_REGISTER, reg_index);
} else {
UNREACHABLE();
@@ -725,14 +726,14 @@ void LAllocator::Define(LifetimePosition position,
if (range->IsEmpty() || range->Start().Value() > position.Value()) {
// Can happen if there is a definition without use.
range->AddUseInterval(position, position.NextInstruction(), zone_);
- range->AddUsePosition(position.NextInstruction(), NULL, zone_);
+ range->AddUsePosition(position.NextInstruction(), NULL, NULL, zone_);
} else {
range->ShortenTo(position);
}
if (operand->IsUnallocated()) {
LUnallocated* unalloc_operand = LUnallocated::cast(operand);
- range->AddUsePosition(position, unalloc_operand, zone_)->set_hint(hint);
+ range->AddUsePosition(position, unalloc_operand, hint, zone_);
}
}
@@ -745,7 +746,7 @@ void LAllocator::Use(LifetimePosition block_start,
if (range == NULL) return;
if (operand->IsUnallocated()) {
LUnallocated* unalloc_operand = LUnallocated::cast(operand);
- range->AddUsePosition(position, unalloc_operand, zone_)->set_hint(hint);
+ range->AddUsePosition(position, unalloc_operand, hint, zone_);
}
range->AddUseInterval(block_start, position, zone_);
}
@@ -845,7 +846,7 @@ void LAllocator::MeetConstraintsBetween(LInstruction* first,
bool is_tagged = HasTaggedValue(cur_input->virtual_register());
AllocateFixed(cur_input, gap_index + 1, is_tagged);
AddConstraintsGapMove(gap_index, input_copy, cur_input);
- } else if (cur_input->policy() == LUnallocated::WRITABLE_REGISTER) {
+ } else if (cur_input->HasWritableRegisterPolicy()) {
// The live range of writable input registers always goes until the end
// of the instruction.
ASSERT(!cur_input->IsUsedAtStart());
@@ -924,7 +925,7 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
if (phi != NULL) {
// This is a phi resolving move.
if (!phi->block()->IsLoopHeader()) {
- hint = LiveRangeFor(phi->id())->FirstHint();
+ hint = LiveRangeFor(phi->id())->current_hint_operand();
}
} else {
if (to->IsUnallocated()) {
@@ -1812,26 +1813,23 @@ bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
}
- UsePosition* hinted_use = current->FirstPosWithHint();
- if (hinted_use != NULL) {
- LOperand* hint = hinted_use->hint();
- if (hint->IsRegister() || hint->IsDoubleRegister()) {
- int register_index = hint->index();
- TraceAlloc(
- "Found reg hint %s (free until [%d) for live range %d (end %d[).\n",
- RegisterName(register_index),
- free_until_pos[register_index].Value(),
- current->id(),
- current->End().Value());
-
- // The desired register is free until the end of the current live range.
- if (free_until_pos[register_index].Value() >= current->End().Value()) {
- TraceAlloc("Assigning preferred reg %s to live range %d\n",
- RegisterName(register_index),
- current->id());
- SetLiveRangeAssignedRegister(current, register_index, mode_, zone_);
- return true;
- }
+ LOperand* hint = current->FirstHint();
+ if (hint != NULL && (hint->IsRegister() || hint->IsDoubleRegister())) {
+ int register_index = hint->index();
+ TraceAlloc(
+ "Found reg hint %s (free until [%d) for live range %d (end %d[).\n",
+ RegisterName(register_index),
+ free_until_pos[register_index].Value(),
+ current->id(),
+ current->End().Value());
+
+ // The desired register is free until the end of the current live range.
+ if (free_until_pos[register_index].Value() >= current->End().Value()) {
+ TraceAlloc("Assigning preferred reg %s to live range %d\n",
+ RegisterName(register_index),
+ current->id());
+ SetLiveRangeAssignedRegister(current, register_index, mode_, zone_);
+ return true;
}
}
diff --git a/deps/v8/src/lithium-allocator.h b/deps/v8/src/lithium-allocator.h
index 8b45531d9..552ebdd1d 100644
--- a/deps/v8/src/lithium-allocator.h
+++ b/deps/v8/src/lithium-allocator.h
@@ -244,13 +244,12 @@ class UseInterval: public ZoneObject {
// Representation of a use position.
class UsePosition: public ZoneObject {
public:
- UsePosition(LifetimePosition pos, LOperand* operand);
+ UsePosition(LifetimePosition pos, LOperand* operand, LOperand* hint);
LOperand* operand() const { return operand_; }
bool HasOperand() const { return operand_ != NULL; }
LOperand* hint() const { return hint_; }
- void set_hint(LOperand* hint) { hint_ = hint; }
bool HasHint() const;
bool RequiresRegister() const;
bool RegisterIsBeneficial() const;
@@ -261,9 +260,9 @@ class UsePosition: public ZoneObject {
private:
void set_next(UsePosition* next) { next_ = next; }
- LOperand* operand_;
- LOperand* hint_;
- LifetimePosition pos_;
+ LOperand* const operand_;
+ LOperand* const hint_;
+ LifetimePosition const pos_;
UsePosition* next_;
bool requires_reg_;
bool register_beneficial_;
@@ -329,10 +328,14 @@ class LiveRange: public ZoneObject {
return assigned_register_ != kInvalidAssignment;
}
bool IsSpilled() const { return spilled_; }
- UsePosition* FirstPosWithHint() const;
+ LOperand* current_hint_operand() const {
+ ASSERT(current_hint_operand_ == FirstHint());
+ return current_hint_operand_;
+ }
LOperand* FirstHint() const {
- UsePosition* pos = FirstPosWithHint();
+ UsePosition* pos = first_pos_;
+ while (pos != NULL && !pos->HasHint()) pos = pos->next();
if (pos != NULL) return pos->hint();
return NULL;
}
@@ -367,9 +370,10 @@ class LiveRange: public ZoneObject {
void AddUseInterval(LifetimePosition start,
LifetimePosition end,
Zone* zone);
- UsePosition* AddUsePosition(LifetimePosition pos,
- LOperand* operand,
- Zone* zone);
+ void AddUsePosition(LifetimePosition pos,
+ LOperand* operand,
+ LOperand* hint,
+ Zone* zone);
// Shorten the most recently added interval by setting a new start.
void ShortenTo(LifetimePosition start);
@@ -398,6 +402,8 @@ class LiveRange: public ZoneObject {
// This is used as a cache, it doesn't affect correctness.
mutable UseInterval* current_interval_;
UsePosition* last_processed_use_;
+ // This is used as a cache, it's invalid outside of BuildLiveRanges.
+ LOperand* current_hint_operand_;
LOperand* spill_operand_;
int spill_start_index_;
};
diff --git a/deps/v8/src/lithium.cc b/deps/v8/src/lithium.cc
index 10d7f7133..539f4eefb 100644
--- a/deps/v8/src/lithium.cc
+++ b/deps/v8/src/lithium.cc
@@ -58,24 +58,27 @@ void LOperand::PrintTo(StringStream* stream) {
case UNALLOCATED:
unalloc = LUnallocated::cast(this);
stream->Add("v%d", unalloc->virtual_register());
- switch (unalloc->policy()) {
+ if (unalloc->basic_policy() == LUnallocated::FIXED_SLOT) {
+ stream->Add("(=%dS)", unalloc->fixed_slot_index());
+ break;
+ }
+ switch (unalloc->extended_policy()) {
case LUnallocated::NONE:
break;
case LUnallocated::FIXED_REGISTER: {
+ int reg_index = unalloc->fixed_register_index();
const char* register_name =
- Register::AllocationIndexToString(unalloc->fixed_index());
+ Register::AllocationIndexToString(reg_index);
stream->Add("(=%s)", register_name);
break;
}
case LUnallocated::FIXED_DOUBLE_REGISTER: {
+ int reg_index = unalloc->fixed_register_index();
const char* double_register_name =
- DoubleRegister::AllocationIndexToString(unalloc->fixed_index());
+ DoubleRegister::AllocationIndexToString(reg_index);
stream->Add("(=%s)", double_register_name);
break;
}
- case LUnallocated::FIXED_SLOT:
- stream->Add("(=%dS)", unalloc->fixed_index());
- break;
case LUnallocated::MUST_HAVE_REGISTER:
stream->Add("(R)");
break;
diff --git a/deps/v8/src/lithium.h b/deps/v8/src/lithium.h
index 24182747e..388f5658d 100644
--- a/deps/v8/src/lithium.h
+++ b/deps/v8/src/lithium.h
@@ -92,12 +92,16 @@ class LOperand: public ZoneObject {
class LUnallocated: public LOperand {
public:
- enum Policy {
+ enum BasicPolicy {
+ FIXED_SLOT,
+ EXTENDED_POLICY
+ };
+
+ enum ExtendedPolicy {
NONE,
ANY,
FIXED_REGISTER,
FIXED_DOUBLE_REGISTER,
- FIXED_SLOT,
MUST_HAVE_REGISTER,
WRITABLE_REGISTER,
SAME_AS_FIRST_INPUT
@@ -117,99 +121,152 @@ class LUnallocated: public LOperand {
USED_AT_END
};
- explicit LUnallocated(Policy policy) : LOperand(UNALLOCATED, 0) {
- Initialize(policy, 0, USED_AT_END);
+ explicit LUnallocated(ExtendedPolicy policy) : LOperand(UNALLOCATED, 0) {
+ value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
+ value_ |= ExtendedPolicyField::encode(policy);
+ value_ |= LifetimeField::encode(USED_AT_END);
}
- LUnallocated(Policy policy, int fixed_index) : LOperand(UNALLOCATED, 0) {
- Initialize(policy, fixed_index, USED_AT_END);
+ LUnallocated(BasicPolicy policy, int index) : LOperand(UNALLOCATED, 0) {
+ ASSERT(policy == FIXED_SLOT);
+ value_ |= BasicPolicyField::encode(policy);
+ value_ |= index << FixedSlotIndexField::kShift;
+ ASSERT(this->fixed_slot_index() == index);
}
- LUnallocated(Policy policy, Lifetime lifetime) : LOperand(UNALLOCATED, 0) {
- Initialize(policy, 0, lifetime);
+ LUnallocated(ExtendedPolicy policy, int index) : LOperand(UNALLOCATED, 0) {
+ ASSERT(policy == FIXED_REGISTER || policy == FIXED_DOUBLE_REGISTER);
+ value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
+ value_ |= ExtendedPolicyField::encode(policy);
+ value_ |= LifetimeField::encode(USED_AT_END);
+ value_ |= FixedRegisterField::encode(index);
}
- // The superclass has a KindField. Some policies have a signed fixed
- // index in the upper bits.
- static const int kPolicyWidth = 3;
- static const int kLifetimeWidth = 1;
- static const int kVirtualRegisterWidth = 15;
-
- static const int kPolicyShift = kKindFieldWidth;
- static const int kLifetimeShift = kPolicyShift + kPolicyWidth;
- static const int kVirtualRegisterShift = kLifetimeShift + kLifetimeWidth;
- static const int kFixedIndexShift =
- kVirtualRegisterShift + kVirtualRegisterWidth;
- static const int kFixedIndexWidth = 32 - kFixedIndexShift;
- STATIC_ASSERT(kFixedIndexWidth > 5);
-
- class PolicyField : public BitField<Policy, kPolicyShift, kPolicyWidth> { };
-
- class LifetimeField
- : public BitField<Lifetime, kLifetimeShift, kLifetimeWidth> {
- };
+ LUnallocated(ExtendedPolicy policy, Lifetime lifetime)
+ : LOperand(UNALLOCATED, 0) {
+ value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
+ value_ |= ExtendedPolicyField::encode(policy);
+ value_ |= LifetimeField::encode(lifetime);
+ }
- class VirtualRegisterField
- : public BitField<unsigned,
- kVirtualRegisterShift,
- kVirtualRegisterWidth> {
- };
+ LUnallocated* CopyUnconstrained(Zone* zone) {
+ LUnallocated* result = new(zone) LUnallocated(ANY);
+ result->set_virtual_register(virtual_register());
+ return result;
+ }
- static const int kMaxVirtualRegisters = 1 << kVirtualRegisterWidth;
- static const int kMaxFixedIndex = (1 << (kFixedIndexWidth - 1)) - 1;
- static const int kMinFixedIndex = -(1 << (kFixedIndexWidth - 1));
+ static LUnallocated* cast(LOperand* op) {
+ ASSERT(op->IsUnallocated());
+ return reinterpret_cast<LUnallocated*>(op);
+ }
+ // The encoding used for LUnallocated operands depends on the policy that is
+ // stored within the operand. The FIXED_SLOT policy uses a compact encoding
+ // because it accommodates a larger pay-load.
+ //
+ // For FIXED_SLOT policy:
+ // +------------------------------------------+
+ // | slot_index | vreg | 0 | 001 |
+ // +------------------------------------------+
+ //
+ // For all other (extended) policies:
+ // +------------------------------------------+
+ // | reg_index | L | PPP | vreg | 1 | 001 | L ... Lifetime
+ // +------------------------------------------+ P ... Policy
+ //
+ // The slot index is a signed value which requires us to decode it manually
+ // instead of using the BitField utility class.
+
+ // The superclass has a KindField.
+ STATIC_ASSERT(kKindFieldWidth == 3);
+
+ // BitFields for all unallocated operands.
+ class BasicPolicyField : public BitField<BasicPolicy, 3, 1> {};
+ class VirtualRegisterField : public BitField<unsigned, 4, 18> {};
+
+ // BitFields specific to BasicPolicy::FIXED_SLOT.
+ class FixedSlotIndexField : public BitField<int, 22, 10> {};
+
+ // BitFields specific to BasicPolicy::EXTENDED_POLICY.
+ class ExtendedPolicyField : public BitField<ExtendedPolicy, 22, 3> {};
+ class LifetimeField : public BitField<Lifetime, 25, 1> {};
+ class FixedRegisterField : public BitField<int, 26, 6> {};
+
+ static const int kMaxVirtualRegisters = VirtualRegisterField::kMax + 1;
+ static const int kFixedSlotIndexWidth = FixedSlotIndexField::kSize;
+ static const int kMaxFixedSlotIndex = (1 << (kFixedSlotIndexWidth - 1)) - 1;
+ static const int kMinFixedSlotIndex = -(1 << (kFixedSlotIndexWidth - 1));
+
+ // Predicates for the operand policy.
bool HasAnyPolicy() const {
- return policy() == ANY;
+ return basic_policy() == EXTENDED_POLICY &&
+ extended_policy() == ANY;
}
bool HasFixedPolicy() const {
- return policy() == FIXED_REGISTER ||
- policy() == FIXED_DOUBLE_REGISTER ||
- policy() == FIXED_SLOT;
+ return basic_policy() == FIXED_SLOT ||
+ extended_policy() == FIXED_REGISTER ||
+ extended_policy() == FIXED_DOUBLE_REGISTER;
}
bool HasRegisterPolicy() const {
- return policy() == WRITABLE_REGISTER || policy() == MUST_HAVE_REGISTER;
+ return basic_policy() == EXTENDED_POLICY && (
+ extended_policy() == WRITABLE_REGISTER ||
+ extended_policy() == MUST_HAVE_REGISTER);
}
bool HasSameAsInputPolicy() const {
- return policy() == SAME_AS_FIRST_INPUT;
+ return basic_policy() == EXTENDED_POLICY &&
+ extended_policy() == SAME_AS_FIRST_INPUT;
+ }
+ bool HasFixedSlotPolicy() const {
+ return basic_policy() == FIXED_SLOT;
+ }
+ bool HasFixedRegisterPolicy() const {
+ return basic_policy() == EXTENDED_POLICY &&
+ extended_policy() == FIXED_REGISTER;
}
- Policy policy() const { return PolicyField::decode(value_); }
- void set_policy(Policy policy) {
- value_ = PolicyField::update(value_, policy);
+ bool HasFixedDoubleRegisterPolicy() const {
+ return basic_policy() == EXTENDED_POLICY &&
+ extended_policy() == FIXED_DOUBLE_REGISTER;
}
- int fixed_index() const {
- return static_cast<int>(value_) >> kFixedIndexShift;
+ bool HasWritableRegisterPolicy() const {
+ return basic_policy() == EXTENDED_POLICY &&
+ extended_policy() == WRITABLE_REGISTER;
}
- int virtual_register() const {
- return VirtualRegisterField::decode(value_);
+ // [basic_policy]: Distinguish between FIXED_SLOT and all other policies.
+ BasicPolicy basic_policy() const {
+ return BasicPolicyField::decode(value_);
}
- void set_virtual_register(unsigned id) {
- value_ = VirtualRegisterField::update(value_, id);
+ // [extended_policy]: Only for non-FIXED_SLOT. The finer-grained policy.
+ ExtendedPolicy extended_policy() const {
+ ASSERT(basic_policy() == EXTENDED_POLICY);
+ return ExtendedPolicyField::decode(value_);
}
- LUnallocated* CopyUnconstrained(Zone* zone) {
- LUnallocated* result = new(zone) LUnallocated(ANY);
- result->set_virtual_register(virtual_register());
- return result;
+ // [fixed_slot_index]: Only for FIXED_SLOT.
+ int fixed_slot_index() const {
+ ASSERT(HasFixedSlotPolicy());
+ return static_cast<int>(value_) >> FixedSlotIndexField::kShift;
}
- static LUnallocated* cast(LOperand* op) {
- ASSERT(op->IsUnallocated());
- return reinterpret_cast<LUnallocated*>(op);
+ // [fixed_register_index]: Only for FIXED_REGISTER or FIXED_DOUBLE_REGISTER.
+ int fixed_register_index() const {
+ ASSERT(HasFixedRegisterPolicy() || HasFixedDoubleRegisterPolicy());
+ return FixedRegisterField::decode(value_);
}
- bool IsUsedAtStart() {
- return LifetimeField::decode(value_) == USED_AT_START;
+ // [virtual_register]: The virtual register ID for this operand.
+ int virtual_register() const {
+ return VirtualRegisterField::decode(value_);
+ }
+ void set_virtual_register(unsigned id) {
+ value_ = VirtualRegisterField::update(value_, id);
}
- private:
- void Initialize(Policy policy, int fixed_index, Lifetime lifetime) {
- value_ |= PolicyField::encode(policy);
- value_ |= LifetimeField::encode(lifetime);
- value_ |= fixed_index << kFixedIndexShift;
- ASSERT(this->fixed_index() == fixed_index);
+ // [lifetime]: Only for non-FIXED_SLOT.
+ bool IsUsedAtStart() {
+ ASSERT(basic_policy() == EXTENDED_POLICY);
+ return LifetimeField::decode(value_) == USED_AT_START;
}
};
diff --git a/deps/v8/src/macros.py b/deps/v8/src/macros.py
index 0c52f3822..643d6c770 100644
--- a/deps/v8/src/macros.py
+++ b/deps/v8/src/macros.py
@@ -116,7 +116,7 @@ macro IS_ERROR(arg) = (%_ClassOf(arg) === 'Error');
macro IS_SCRIPT(arg) = (%_ClassOf(arg) === 'Script');
macro IS_ARGUMENTS(arg) = (%_ClassOf(arg) === 'Arguments');
macro IS_GLOBAL(arg) = (%_ClassOf(arg) === 'global');
-macro IS_ARRAYBUFFER(arg) = (%_ClassOf(arg) === '__ArrayBuffer');
+macro IS_ARRAYBUFFER(arg) = (%_ClassOf(arg) === 'ArrayBuffer');
macro IS_GENERATOR(arg) = (%_ClassOf(arg) === 'Generator');
macro IS_UNDETECTABLE(arg) = (%_IsUndetectableObject(arg));
macro FLOOR(arg) = $floor(arg);
diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js
index 15a39b7b8..b9bce1ebd 100644
--- a/deps/v8/src/messages.js
+++ b/deps/v8/src/messages.js
@@ -101,16 +101,24 @@ var kMessages = {
observe_type_non_string: ["Invalid changeRecord with non-string 'type' property"],
observe_notify_non_notifier: ["notify called on non-notifier object"],
proto_poison_pill: ["Generic use of __proto__ accessor not allowed"],
+ parameterless_typed_array_constr:
+ ["%0"," constructor should have at least one argument."],
+ not_typed_array: ["this is not a typed array."],
+ invalid_argument: ["invalid_argument"],
// RangeError
invalid_array_length: ["Invalid array length"],
invalid_array_buffer_length: ["Invalid array buffer length"],
invalid_typed_array_offset: ["Start offset is too large"],
invalid_typed_array_length: ["Length is too large"],
invalid_typed_array_alignment: ["%0", "of", "%1", "should be a multiple of", "%3"],
+ typed_array_set_source_too_large:
+ ["Source is too large"],
stack_overflow: ["Maximum call stack size exceeded"],
invalid_time_value: ["Invalid time value"],
// SyntaxError
- unable_to_parse: ["Parse error"],
+ paren_in_arg_string: ["Function arg string contains parenthesis"],
+ not_isvar: ["builtin %IS_VAR: not a variable"],
+ single_function_literal: ["Single function literal required"],
invalid_regexp_flags: ["Invalid flags supplied to RegExp constructor '", "%0", "'"],
invalid_regexp: ["Invalid RegExp pattern /", "%0", "/"],
illegal_break: ["Illegal break statement"],
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index 733c3694d..f1c2553d1 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -30,6 +30,7 @@
#if defined(V8_TARGET_ARCH_MIPS)
#include "bootstrapper.h"
+#include "builtins-decls.h"
#include "code-stubs.h"
#include "codegen.h"
#include "regexp-macro-assembler.h"
@@ -74,6 +75,28 @@ void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
}
+void LoadFieldStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a0 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->stack_parameter_count_ = NULL;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a1 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->stack_parameter_count_ = NULL;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -116,9 +139,10 @@ static void InitializeArrayConstructorDescriptor(
int constant_stack_parameter_count) {
// register state
// a0 -- number of arguments
+ // a1 -- function
// a2 -- type info cell with elements kind
- static Register registers[] = { a2 };
- descriptor->register_param_count_ = 1;
+ static Register registers[] = { a1, a2 };
+ descriptor->register_param_count_ = 2;
if (constant_stack_parameter_count != 0) {
// stack param count needs (constructor pointer, and single argument)
descriptor->stack_parameter_count_ = &a0;
@@ -4099,12 +4123,6 @@ Register InstanceofStub::left() { return a0; }
Register InstanceofStub::right() { return a1; }
-void LoadFieldStub::Generate(MacroAssembler* masm) {
- StubCompiler::DoGenerateFastPropertyLoad(masm, v0, reg_, inobject_, index_);
- __ Ret();
-}
-
-
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The displacement is the offset of the last parameter (if any)
// relative to the frame pointer.
@@ -5103,6 +5121,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
Handle<Object> terminal_kind_sentinel =
TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(),
LAST_FAST_ELEMENTS_KIND);
+ __ JumpIfNotSmi(a3, &miss);
__ Branch(&miss, gt, a3, Operand(terminal_kind_sentinel));
// Make sure the function is the Array() function
__ LoadArrayFunction(a3);
@@ -6300,25 +6319,53 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ AllocateAsciiConsString(v0, t2, t0, t1, &call_runtime);
__ bind(&allocated);
// Fill the fields of the cons string.
+ Label skip_write_barrier, after_writing;
+ ExternalReference high_promotion_mode = ExternalReference::
+ new_space_high_promotion_mode_active_address(masm->isolate());
+ __ li(t0, Operand(high_promotion_mode));
+ __ lw(t0, MemOperand(t0, 0));
+ __ Branch(&skip_write_barrier, eq, t0, Operand(zero_reg));
+
+ __ mov(t3, v0);
+ __ sw(a0, FieldMemOperand(t3, ConsString::kFirstOffset));
+ __ RecordWriteField(t3,
+ ConsString::kFirstOffset,
+ a0,
+ t0,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs);
+ __ sw(a1, FieldMemOperand(t3, ConsString::kSecondOffset));
+ __ RecordWriteField(t3,
+ ConsString::kSecondOffset,
+ a1,
+ t0,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs);
+ __ jmp(&after_writing);
+
+ __ bind(&skip_write_barrier);
__ sw(a0, FieldMemOperand(v0, ConsString::kFirstOffset));
__ sw(a1, FieldMemOperand(v0, ConsString::kSecondOffset));
+
+ __ bind(&after_writing);
+
__ IncrementCounter(counters->string_add_native(), 1, a2, a3);
__ DropAndRet(2);
__ bind(&non_ascii);
// At least one of the strings is two-byte. Check whether it happens
- // to contain only ASCII characters.
+ // to contain only one byte characters.
// t0: first instance type.
// t1: second instance type.
- // Branch to if _both_ instances have kAsciiDataHintMask set.
- __ And(at, t0, Operand(kAsciiDataHintMask));
+ // Branch to if _both_ instances have kOneByteDataHintMask set.
+ __ And(at, t0, Operand(kOneByteDataHintMask));
__ and_(at, at, t1);
__ Branch(&ascii_data, ne, at, Operand(zero_reg));
__ Xor(t0, t0, Operand(t1));
- STATIC_ASSERT(kOneByteStringTag != 0 && kAsciiDataHintTag != 0);
- __ And(t0, t0, Operand(kOneByteStringTag | kAsciiDataHintTag));
+ STATIC_ASSERT(kOneByteStringTag != 0 && kOneByteDataHintTag != 0);
+ __ And(t0, t0, Operand(kOneByteStringTag | kOneByteDataHintTag));
__ Branch(&ascii_data, eq, t0,
- Operand(kOneByteStringTag | kAsciiDataHintTag));
+ Operand(kOneByteStringTag | kOneByteDataHintTag));
// Allocate a two byte cons string.
__ AllocateTwoByteConsString(v0, t2, t0, t1, &call_runtime);
@@ -7181,6 +7228,9 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
{ REG(t1), REG(a0), REG(t2), EMIT_REMEMBERED_SET },
// FastNewClosureStub::Generate
{ REG(a2), REG(t0), REG(a1), EMIT_REMEMBERED_SET },
+ // StringAddStub::Generate
+ { REG(t3), REG(a1), REG(t0), EMIT_REMEMBERED_SET },
+ { REG(t3), REG(a0), REG(t0), EMIT_REMEMBERED_SET },
// Null termination.
{ REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
};
@@ -7703,13 +7753,8 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
Label no_info, switch_ready;
// Get the elements kind and case on that.
__ Branch(&no_info, eq, a2, Operand(undefined_sentinel));
- __ lw(a3, FieldMemOperand(a2, kPointerSize));
-
- // There is no info if the call site went megamorphic either
- // TODO(mvstanton): Really? I thought if it was the array function that
- // the cell wouldn't get stamped as megamorphic.
- __ Branch(&no_info, eq, a3,
- Operand(TypeFeedbackCells::MegamorphicSentinel(masm->isolate())));
+ __ lw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
+ __ JumpIfNotSmi(a3, &no_info);
__ SmiUntag(a3);
__ jmp(&switch_ready);
__ bind(&no_info);
diff --git a/deps/v8/src/mips/debug-mips.cc b/deps/v8/src/mips/debug-mips.cc
index 3be1e4d8b..0ae01875e 100644
--- a/deps/v8/src/mips/debug-mips.cc
+++ b/deps/v8/src/mips/debug-mips.cc
@@ -236,6 +236,15 @@ void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
}
+void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
+ // Register state for CompareNil IC
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, a0.bit(), 0);
+}
+
+
void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC call (from ic-mips.cc).
// ----------- S t a t e -------------
diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc
index a6fd39aa1..872af86a9 100644
--- a/deps/v8/src/mips/full-codegen-mips.cc
+++ b/deps/v8/src/mips/full-codegen-mips.cc
@@ -1592,7 +1592,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
: ObjectLiteral::kNoFlags;
__ li(a0, Operand(Smi::FromInt(flags)));
int properties_count = constant_properties->length() / 2;
- if (expr->depth() > 1) {
+ if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
+ expr->depth() > 1) {
__ Push(a3, a2, a1, a0);
__ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements ||
@@ -1943,11 +1944,12 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
Label resume;
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ Branch(&resume, ne, result_register(), Operand(at));
- __ pop(result_register());
if (expr->yield_kind() == Yield::SUSPEND) {
- // TODO(wingo): Box into { value: VALUE, done: false }.
+ EmitReturnIteratorResult(false);
+ } else {
+ __ pop(result_register());
+ EmitReturnSequence();
}
- EmitReturnSequence();
__ bind(&resume);
context()->Plug(result_register());
@@ -1959,18 +1961,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ li(a1, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
__ sw(a1, FieldMemOperand(result_register(),
JSGeneratorObject::kContinuationOffset));
- __ pop(result_register());
- // TODO(wingo): Box into { value: VALUE, done: true }.
-
- // Exit all nested statements.
- NestedStatement* current = nesting_stack_;
- int stack_depth = 0;
- int context_length = 0;
- while (current != NULL) {
- current = current->Exit(&stack_depth, &context_length);
- }
- __ Drop(stack_depth);
- EmitReturnSequence();
+ EmitReturnIteratorResult(true);
break;
}
@@ -2057,7 +2048,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ Subu(a3, a3, Operand(1));
__ Branch(&call_resume, lt, a3, Operand(zero_reg));
__ push(a2);
- __ b(&push_operand_holes);
+ __ Branch(&push_operand_holes);
__ bind(&call_resume);
__ push(a1);
__ push(result_register());
@@ -2076,6 +2067,56 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
}
+void FullCodeGenerator::EmitReturnIteratorResult(bool done) {
+ Label gc_required;
+ Label allocated;
+
+ Handle<Map> map(isolate()->native_context()->generator_result_map());
+
+ __ Allocate(map->instance_size(), a0, a2, a3, &gc_required, TAG_OBJECT);
+
+ __ bind(&allocated);
+ __ li(a1, Operand(map));
+ __ pop(a2);
+ __ li(a3, Operand(isolate()->factory()->ToBoolean(done)));
+ __ li(t0, Operand(isolate()->factory()->empty_fixed_array()));
+ ASSERT_EQ(map->instance_size(), 5 * kPointerSize);
+ __ sw(a1, FieldMemOperand(a0, HeapObject::kMapOffset));
+ __ sw(t0, FieldMemOperand(a0, JSObject::kPropertiesOffset));
+ __ sw(t0, FieldMemOperand(a0, JSObject::kElementsOffset));
+ __ sw(a2,
+ FieldMemOperand(a0, JSGeneratorObject::kResultValuePropertyOffset));
+ __ sw(a3,
+ FieldMemOperand(a0, JSGeneratorObject::kResultDonePropertyOffset));
+
+ // Only the value field needs a write barrier, as the other values are in the
+ // root set.
+ __ RecordWriteField(a0, JSGeneratorObject::kResultValuePropertyOffset,
+ a2, a3, kRAHasBeenSaved, kDontSaveFPRegs);
+
+ if (done) {
+ // Exit all nested statements.
+ NestedStatement* current = nesting_stack_;
+ int stack_depth = 0;
+ int context_length = 0;
+ while (current != NULL) {
+ current = current->Exit(&stack_depth, &context_length);
+ }
+ __ Drop(stack_depth);
+ }
+
+ __ mov(result_register(), a0);
+ EmitReturnSequence();
+
+ __ bind(&gc_required);
+ __ Push(Smi::FromInt(map->instance_size()));
+ __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ lw(context_register(),
+ MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ jmp(&allocated);
+}
+
+
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
diff --git a/deps/v8/src/mips/ic-mips.cc b/deps/v8/src/mips/ic-mips.cc
index e434fdb77..02055a448 100644
--- a/deps/v8/src/mips/ic-mips.cc
+++ b/deps/v8/src/mips/ic-mips.cc
@@ -1440,6 +1440,25 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
}
+void StoreIC::GenerateSlow(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- a0 : value
+ // -- a2 : key
+ // -- a1 : receiver
+ // -- ra : return address
+ // -----------------------------------
+
+ // Push receiver, key and value for runtime call.
+ __ Push(a1, a2, a0);
+
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- a0 : value
diff --git a/deps/v8/src/mips/lithium-codegen-mips.cc b/deps/v8/src/mips/lithium-codegen-mips.cc
index 0c2983f23..77e4216f1 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.cc
+++ b/deps/v8/src/mips/lithium-codegen-mips.cc
@@ -91,6 +91,10 @@ void LCodeGen::FinishCode(Handle<Code> code) {
prototype_maps_.at(i)->AddDependentCode(
DependentCode::kPrototypeCheckGroup, code);
}
+ for (int i = 0 ; i < transition_maps_.length(); i++) {
+ transition_maps_.at(i)->AddDependentCode(
+ DependentCode::kTransitionGroup, code);
+ }
}
@@ -2684,13 +2688,20 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+ int offset = instr->hydrogen()->offset();
Register object = ToRegister(instr->object());
+ if (instr->hydrogen()->representation().IsDouble()) {
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ ldc1(result, FieldMemOperand(object, offset));
+ return;
+ }
+
Register result = ToRegister(instr->result());
if (instr->hydrogen()->is_in_object()) {
- __ lw(result, FieldMemOperand(object, instr->hydrogen()->offset()));
+ __ lw(result, FieldMemOperand(object, offset));
} else {
__ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
- __ lw(result, FieldMemOperand(result, instr->hydrogen()->offset()));
+ __ lw(result, FieldMemOperand(result, offset));
}
}
@@ -2835,38 +2846,6 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
}
-void LCodeGen::DoLoadElements(LLoadElements* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->object());
- Register scratch = scratch0();
-
- __ lw(result, FieldMemOperand(input, JSObject::kElementsOffset));
- if (FLAG_debug_code) {
- Label done, fail;
- __ lw(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
- __ Branch(USE_DELAY_SLOT, &done, eq, scratch, Operand(at));
- __ LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex); // In the delay slot.
- __ Branch(&done, eq, scratch, Operand(at));
- // |scratch| still contains |input|'s map.
- __ lbu(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
- __ Ext(scratch, scratch, Map::kElementsKindShift,
- Map::kElementsKindBitCount);
- __ Branch(&fail, lt, scratch,
- Operand(GetInitialFastElementsKind()));
- __ Branch(&done, le, scratch,
- Operand(TERMINAL_FAST_ELEMENTS_KIND));
- __ Branch(&fail, lt, scratch,
- Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
- __ Branch(&done, le, scratch,
- Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
- __ bind(&fail);
- __ Abort("Check for fast or external elements failed.");
- __ bind(&done);
- }
-}
-
-
void LCodeGen::DoLoadExternalArrayPointer(
LLoadExternalArrayPointer* instr) {
Register to_reg = ToRegister(instr->result());
@@ -3891,8 +3870,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
__ li(a0, Operand(instr->arity()));
__ li(a2, Operand(instr->hydrogen()->property_cell()));
- Object* cell_value = instr->hydrogen()->property_cell()->value();
- ElementsKind kind = static_cast<ElementsKind>(Smi::cast(cell_value)->value());
+ ElementsKind kind = instr->hydrogen()->elements_kind();
if (instr->arity() == 0) {
ArrayNoArgumentConstructorStub stub(kind);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
@@ -3919,15 +3897,34 @@ void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+ Representation representation = instr->representation();
+
Register object = ToRegister(instr->object());
- Register value = ToRegister(instr->value());
Register scratch = scratch0();
int offset = instr->offset();
- ASSERT(!object.is(value));
+ Handle<Map> transition = instr->transition();
+
+ if (FLAG_track_fields && representation.IsSmi()) {
+ Register value = ToRegister(instr->value());
+ __ SmiTagCheckOverflow(value, value, scratch);
+ if (!instr->hydrogen()->value()->range()->IsInSmiRange()) {
+ DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
+ }
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ ASSERT(transition.is_null());
+ ASSERT(instr->is_in_object());
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ __ sdc1(value, FieldMemOperand(object, offset));
+ return;
+ }
- if (!instr->transition().is_null()) {
- __ li(scratch, Operand(instr->transition()));
+ if (!transition.is_null()) {
+ if (transition->CanBeDeprecated()) {
+ transition_maps_.Add(transition, info()->zone());
+ }
+ __ li(scratch, Operand(transition));
__ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
Register temp = ToRegister(instr->temp());
@@ -3944,6 +3941,8 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
// Do the store.
+ Register value = ToRegister(instr->value());
+ ASSERT(!object.is(value));
HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
@@ -4811,6 +4810,8 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
} else {
mode = NUMBER_CANDIDATE_IS_SMI;
}
+ } else {
+ mode = NUMBER_CANDIDATE_IS_SMI;
}
}
@@ -5150,7 +5151,6 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
- Register size = ToRegister(instr->size());
Register result = ToRegister(instr->result());
// TODO(3095996): Get rid of this. For now, we need to make the
@@ -5159,8 +5159,16 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
__ mov(result, zero_reg);
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- __ SmiTag(size, size);
- __ push(size);
+ if (instr->size()->IsRegister()) {
+ Register size = ToRegister(instr->size());
+ ASSERT(!size.is(result));
+ __ SmiTag(size);
+ __ push(size);
+ } else {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ Push(Smi::FromInt(size));
+ }
+
if (instr->hydrogen()->CanAllocateInOldPointerSpace()) {
CallRuntimeFromDeferred(
Runtime::kAllocateInOldPointerSpace, 1, instr);
@@ -5246,7 +5254,8 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
// Pick the right runtime function or stub to call.
int properties_count = instr->hydrogen()->constant_properties_length() / 2;
- if (instr->hydrogen()->depth() > 1) {
+ if ((FLAG_track_double_fields && instr->hydrogen()->may_store_doubles()) ||
+ instr->hydrogen()->depth() > 1) {
__ Push(a3, a2, a1, a0);
CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
} else if (flags != ObjectLiteral::kFastElements ||
diff --git a/deps/v8/src/mips/lithium-codegen-mips.h b/deps/v8/src/mips/lithium-codegen-mips.h
index f082c01dd..a36059b53 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.h
+++ b/deps/v8/src/mips/lithium-codegen-mips.h
@@ -56,6 +56,7 @@ class LCodeGen BASE_EMBEDDED {
deopt_jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
prototype_maps_(0, info->zone()),
+ transition_maps_(0, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
@@ -416,6 +417,7 @@ class LCodeGen BASE_EMBEDDED {
ZoneList<JumpTableEntry> deopt_jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
ZoneList<Handle<Map> > prototype_maps_;
+ ZoneList<Handle<Map> > transition_maps_;
int inlined_function_count_;
Scope* const scope_;
Status status_;
diff --git a/deps/v8/src/mips/lithium-mips.cc b/deps/v8/src/mips/lithium-mips.cc
index c2f89867d..28309e2df 100644
--- a/deps/v8/src/mips/lithium-mips.cc
+++ b/deps/v8/src/mips/lithium-mips.cc
@@ -552,6 +552,11 @@ LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
}
+LOperand* LChunkBuilder::UseConstant(HValue* value) {
+ return chunk_->DefineConstantOperand(HConstant::cast(value));
+}
+
+
LOperand* LChunkBuilder::UseAny(HValue* value) {
return value->IsConstant()
? chunk_->DefineConstantOperand(HConstant::cast(value))
@@ -672,7 +677,7 @@ LUnallocated* LChunkBuilder::TempRegister() {
int vreg = allocator_->GetVirtualRegister();
if (!allocator_->AllocationOk()) {
Abort("Out of virtual registers while trying to allocate temp register.");
- return NULL;
+ vreg = 0;
}
operand->set_virtual_register(vreg);
return operand;
@@ -1301,8 +1306,8 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
return DefineAsRegister(new(zone()) LBitI(left, right));
} else {
ASSERT(instr->representation().IsTagged());
@@ -1397,15 +1402,15 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left;
- LOperand* right = UseOrConstant(instr->MostConstantOperand());
+ LOperand* right = UseOrConstant(instr->BetterRightOperand());
LOperand* temp = NULL;
if (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
(instr->CheckFlag(HValue::kCanOverflow) ||
!right->IsConstantOperand())) {
- left = UseRegister(instr->LeastConstantOperand());
+ left = UseRegister(instr->BetterLeftOperand());
temp = TempRegister();
} else {
- left = UseRegisterAtStart(instr->LeastConstantOperand());
+ left = UseRegisterAtStart(instr->BetterLeftOperand());
}
LMulI* mul = new(zone()) LMulI(left, right, temp);
if (instr->CheckFlag(HValue::kCanOverflow) ||
@@ -1470,8 +1475,8 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
LAddI* add = new(zone()) LAddI(left, right);
LInstruction* result = DefineAsRegister(add);
if (instr->CheckFlag(HValue::kCanOverflow)) {
@@ -1502,8 +1507,8 @@ LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- left = UseRegisterAtStart(instr->LeastConstantOperand());
- right = UseOrConstantAtStart(instr->MostConstantOperand());
+ left = UseRegisterAtStart(instr->BetterLeftOperand());
+ right = UseOrConstantAtStart(instr->BetterRightOperand());
} else {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->left()->representation().IsDouble());
@@ -1988,8 +1993,8 @@ LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
- return DefineAsRegister(
- new(zone()) LLoadNamedField(UseRegisterAtStart(instr->object())));
+ LOperand* obj = UseRegisterAtStart(instr->object());
+ return DefineAsRegister(new(zone()) LLoadNamedField(obj));
}
@@ -2024,12 +2029,6 @@ LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
}
-LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LLoadElements(input));
-}
-
-
LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
HLoadExternalArrayPointer* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
@@ -2194,14 +2193,25 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
: UseRegisterAtStart(instr->object());
}
- LOperand* val = needs_write_barrier
- ? UseTempRegister(instr->value())
- : UseRegister(instr->value());
+ LOperand* val;
+ if (needs_write_barrier ||
+ (FLAG_track_fields && instr->field_representation().IsSmi())) {
+ val = UseTempRegister(instr->value());
+ } else if (FLAG_track_double_fields &&
+ instr->field_representation().IsDouble()) {
+ val = UseRegisterAtStart(instr->value());
+ } else {
+ val = UseRegister(instr->value());
+ }
// We need a temporary register for write barrier of the map field.
LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
- return new(zone()) LStoreNamedField(obj, val, temp);
+ LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp);
+ if (FLAG_track_fields && instr->field_representation().IsSmi()) {
+ return AssignEnvironment(result);
+ }
+ return result;
}
@@ -2253,7 +2263,9 @@ LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling();
- LOperand* size = UseTempRegister(instr->size());
+ LOperand* size = instr->size()->IsConstant()
+ ? UseConstant(instr->size())
+ : UseTempRegister(instr->size());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
LAllocate* result = new(zone()) LAllocate(size, temp1, temp2);
@@ -2315,7 +2327,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
- if (spill_index > LUnallocated::kMaxFixedIndex) {
+ if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
Abort("Too many spill slots needed for OSR");
spill_index = 0;
}
diff --git a/deps/v8/src/mips/lithium-mips.h b/deps/v8/src/mips/lithium-mips.h
index cfca64452..bb8c9930c 100644
--- a/deps/v8/src/mips/lithium-mips.h
+++ b/deps/v8/src/mips/lithium-mips.h
@@ -122,7 +122,6 @@ class LCodeGen;
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
- V(LoadElements) \
V(LoadExternalArrayPointer) \
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
@@ -1521,18 +1520,6 @@ class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadElements: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadElements(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
-};
-
-
class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadExternalArrayPointer(LOperand* object) {
@@ -2089,6 +2076,9 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
Handle<Map> transition() const { return hydrogen()->transition(); }
+ Representation representation() const {
+ return hydrogen()->field_representation();
+ }
};
@@ -2722,6 +2712,9 @@ class LChunkBuilder BASE_EMBEDDED {
MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+ // An input operand in a constant operand.
+ MUST_USE_RESULT LOperand* UseConstant(HValue* value);
+
// An input operand in register, stack slot or a constant operand.
// Will not be moved to a register even if one is freely available.
MUST_USE_RESULT LOperand* UseAny(HValue* value);
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index 220d9fe0c..81e9ec980 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -3135,8 +3135,34 @@ void MacroAssembler::AllocateAsciiConsString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
- Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ Label allocate_new_space, install_map;
+ AllocationFlags flags = TAG_OBJECT;
+
+ ExternalReference high_promotion_mode = ExternalReference::
+ new_space_high_promotion_mode_active_address(isolate());
+ li(scratch1, Operand(high_promotion_mode));
+ lw(scratch1, MemOperand(scratch1, 0));
+ Branch(&allocate_new_space, eq, scratch1, Operand(zero_reg));
+
+ Allocate(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
+
+ jmp(&install_map);
+
+ bind(&allocate_new_space);
+ Allocate(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ flags);
+
+ bind(&install_map);
+
InitializeNewString(result,
length,
Heap::kConsAsciiStringMapRootIndex,
@@ -5135,6 +5161,18 @@ void MacroAssembler::CheckPageFlag(
}
+void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated) {
+ if (map->CanBeDeprecated()) {
+ li(scratch, Operand(map));
+ lw(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
+ And(scratch, scratch, Operand(Smi::FromInt(Map::Deprecated::kMask)));
+ Branch(if_deprecated, ne, scratch, Operand(zero_reg));
+ }
+}
+
+
void MacroAssembler::JumpIfBlack(Register object,
Register scratch0,
Register scratch1,
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index e914f2402..248e5b4bc 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -325,6 +325,10 @@ class MacroAssembler: public Assembler {
Condition cc,
Label* condition_met);
+ void CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated);
+
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but it will be clobbered.
void JumpIfNotInNewSpace(Register object,
diff --git a/deps/v8/src/mips/stub-cache-mips.cc b/deps/v8/src/mips/stub-cache-mips.cc
index e110c47c6..80ab31a5e 100644
--- a/deps/v8/src/mips/stub-cache-mips.cc
+++ b/deps/v8/src/mips/stub-cache-mips.cc
@@ -307,11 +307,13 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
}
-void StubCompiler::DoGenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst,
- Register src,
- bool inobject,
- int index) {
+void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
+ Register dst,
+ Register src,
+ bool inobject,
+ int index,
+ Representation representation) {
+ ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
int offset = index * kPointerSize;
if (!inobject) {
// Calculate the offset into the properties array.
@@ -442,8 +444,10 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
Register value_reg,
Register scratch1,
Register scratch2,
+ Register scratch3,
Label* miss_label,
- Label* miss_restore_name) {
+ Label* miss_restore_name,
+ Label* slow) {
// a0 : value.
Label exit;
@@ -456,6 +460,15 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
}
+ int descriptor = transition->LastAdded();
+ DescriptorArray* descriptors = transition->instance_descriptors();
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+ Representation representation = details.representation();
+ ASSERT(!representation.IsNone());
+
+ // Ensure no transitions to deprecated maps are followed.
+ __ CheckMapDeprecated(transition, scratch1, miss_label);
+
// Check that we are allowed to write this.
if (object->GetPrototype()->IsJSObject()) {
JSObject* holder;
@@ -471,7 +484,7 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
}
Register holder_reg = CheckPrototypes(
object, receiver_reg, Handle<JSObject>(holder), name_reg,
- scratch1, scratch2, name, miss_restore_name);
+ scratch1, scratch2, name, miss_restore_name, SKIP_RECEIVER);
// If no property was found, and the holder (the last object in the
// prototype chain) is in slow mode, we need to do a negative lookup on the
// holder.
@@ -490,6 +503,30 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
}
}
+ Register storage_reg = name_reg;
+
+ if (FLAG_track_fields && representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_restore_name);
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ Label do_store, heap_number;
+ __ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow);
+
+ __ JumpIfNotSmi(value_reg, &heap_number);
+ __ SmiUntag(scratch1, value_reg);
+ __ mtc1(scratch1, f6);
+ __ cvt_d_w(f4, f6);
+ __ jmp(&do_store);
+
+ __ bind(&heap_number);
+ __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
+ miss_restore_name, DONT_DO_SMI_CHECK);
+ __ ldc1(f4, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+
+ __ bind(&do_store);
+ __ sdc1(f4, FieldMemOperand(storage_reg, HeapNumber::kValueOffset));
+ }
+
// Stub never generated for non-global objects that require access
// checks.
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
@@ -517,7 +554,7 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ RecordWriteField(receiver_reg,
HeapObject::kMapOffset,
scratch1,
- name_reg,
+ scratch2,
kRAHasNotBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
@@ -535,40 +572,61 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
- __ sw(value_reg, FieldMemOperand(receiver_reg, offset));
+ if (FLAG_track_double_fields && representation.IsDouble()) {
+ __ sw(storage_reg, FieldMemOperand(receiver_reg, offset));
+ } else {
+ __ sw(value_reg, FieldMemOperand(receiver_reg, offset));
+ }
+
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(value_reg, &exit);
- // Skip updating write barrier if storing a smi.
- __ JumpIfSmi(value_reg, &exit);
-
- // Update the write barrier for the array address.
- // Pass the now unused name_reg as a scratch register.
- __ mov(name_reg, value_reg);
- __ RecordWriteField(receiver_reg,
- offset,
- name_reg,
- scratch1,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs);
+ // Update the write barrier for the array address.
+ // Pass the now unused name_reg as a scratch register.
+ if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ __ mov(name_reg, value_reg);
+ } else {
+ ASSERT(storage_reg.is(name_reg));
+ }
+ __ RecordWriteField(receiver_reg,
+ offset,
+ name_reg,
+ scratch1,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs);
+ }
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array
__ lw(scratch1,
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ sw(value_reg, FieldMemOperand(scratch1, offset));
+ if (FLAG_track_double_fields && representation.IsDouble()) {
+ __ sw(storage_reg, FieldMemOperand(scratch1, offset));
+ } else {
+ __ sw(value_reg, FieldMemOperand(scratch1, offset));
+ }
- // Skip updating write barrier if storing a smi.
- __ JumpIfSmi(value_reg, &exit);
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(value_reg, &exit);
- // Update the write barrier for the array address.
- // Ok to clobber receiver_reg and name_reg, since we return.
- __ mov(name_reg, value_reg);
- __ RecordWriteField(scratch1,
- offset,
- name_reg,
- receiver_reg,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs);
+ // Update the write barrier for the array address.
+ // Ok to clobber receiver_reg and name_reg, since we return.
+ if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ __ mov(name_reg, value_reg);
+ } else {
+ ASSERT(storage_reg.is(name_reg));
+ }
+ __ mov(name_reg, value_reg);
+ __ RecordWriteField(scratch1,
+ offset,
+ name_reg,
+ receiver_reg,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs);
+ }
}
// Return the value (register v0).
@@ -615,24 +673,64 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// object and the number of in-object properties is not going to change.
index -= object->map()->inobject_properties();
+ Representation representation = lookup->representation();
+ ASSERT(!representation.IsNone());
+ if (FLAG_track_fields && representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ // Load the double storage.
+ if (index < 0) {
+ int offset = object->map()->instance_size() + (index * kPointerSize);
+ __ lw(scratch1, FieldMemOperand(receiver_reg, offset));
+ } else {
+ __ lw(scratch1,
+ FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ __ lw(scratch1, FieldMemOperand(scratch1, offset));
+ }
+
+ // Store the value into the storage.
+ Label do_store, heap_number;
+ __ JumpIfNotSmi(value_reg, &heap_number);
+ __ SmiUntag(scratch2, value_reg);
+ __ mtc1(scratch2, f6);
+ __ cvt_d_w(f4, f6);
+ __ jmp(&do_store);
+
+ __ bind(&heap_number);
+ __ CheckMap(value_reg, scratch2, Heap::kHeapNumberMapRootIndex,
+ miss_label, DONT_DO_SMI_CHECK);
+ __ ldc1(f4, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+
+ __ bind(&do_store);
+ __ sdc1(f4, FieldMemOperand(scratch1, HeapNumber::kValueOffset));
+ // Return the value (register v0).
+ ASSERT(value_reg.is(a0));
+ __ mov(v0, a0);
+ __ Ret();
+ return;
+ }
+
// TODO(verwaest): Share this code as a code stub.
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
__ sw(value_reg, FieldMemOperand(receiver_reg, offset));
- // Skip updating write barrier if storing a smi.
- __ JumpIfSmi(value_reg, &exit);
-
- // Update the write barrier for the array address.
- // Pass the now unused name_reg as a scratch register.
- __ mov(name_reg, value_reg);
- __ RecordWriteField(receiver_reg,
- offset,
- name_reg,
- scratch1,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs);
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(value_reg, &exit);
+
+ // Update the write barrier for the array address.
+ // Pass the now unused name_reg as a scratch register.
+ __ mov(name_reg, value_reg);
+ __ RecordWriteField(receiver_reg,
+ offset,
+ name_reg,
+ scratch1,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs);
+ }
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
@@ -641,18 +739,20 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
__ sw(value_reg, FieldMemOperand(scratch1, offset));
- // Skip updating write barrier if storing a smi.
- __ JumpIfSmi(value_reg, &exit);
-
- // Update the write barrier for the array address.
- // Ok to clobber receiver_reg and name_reg, since we return.
- __ mov(name_reg, value_reg);
- __ RecordWriteField(scratch1,
- offset,
- name_reg,
- receiver_reg,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs);
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(value_reg, &exit);
+
+ // Update the write barrier for the array address.
+ // Ok to clobber receiver_reg and name_reg, since we return.
+ __ mov(name_reg, value_reg);
+ __ RecordWriteField(scratch1,
+ offset,
+ name_reg,
+ receiver_reg,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs);
+ }
}
// Return the value (register v0).
@@ -1269,9 +1369,20 @@ void BaseLoadStubCompiler::NonexistentHandlerFrontend(
void BaseLoadStubCompiler::GenerateLoadField(Register reg,
Handle<JSObject> holder,
- PropertyIndex index) {
- GenerateFastPropertyLoad(masm(), v0, reg, holder, index);
- __ Ret();
+ PropertyIndex field,
+ Representation representation) {
+ if (!reg.is(receiver())) __ mov(receiver(), reg);
+ if (kind() == Code::LOAD_IC) {
+ LoadFieldStub stub(field.is_inobject(holder),
+ field.translate(holder),
+ representation);
+ GenerateTailCall(masm(), stub.GetCode(isolate()));
+ } else {
+ KeyedLoadFieldStub stub(field.is_inobject(holder),
+ field.translate(holder),
+ representation);
+ GenerateTailCall(masm(), stub.GetCode(isolate()));
+ }
}
@@ -1501,7 +1612,8 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
// Do the right check and compute the holder register.
Register reg = CheckPrototypes(object, a0, holder, a1, a3, t0, name, &miss);
- GenerateFastPropertyLoad(masm(), a1, reg, holder, index);
+ GenerateFastPropertyLoad(masm(), a1, reg, index.is_inobject(holder),
+ index.translate(holder), Representation::Tagged());
GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
@@ -2935,18 +3047,24 @@ Handle<Code> BaseLoadStubCompiler::CompilePolymorphicIC(
Register map_reg = scratch1();
int receiver_count = receiver_maps->length();
+ int number_of_handled_maps = 0;
__ lw(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
for (int current = 0; current < receiver_count; ++current) {
- __ Jump(handlers->at(current), RelocInfo::CODE_TARGET,
- eq, map_reg, Operand(receiver_maps->at(current)));
+ Handle<Map> map = receiver_maps->at(current);
+ if (!map->is_deprecated()) {
+ number_of_handled_maps++;
+ __ Jump(handlers->at(current), RelocInfo::CODE_TARGET,
+ eq, map_reg, Operand(receiver_maps->at(current)));
+ }
}
+ ASSERT(number_of_handled_maps != 0);
__ bind(&miss);
TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
InlineCacheState state =
- receiver_maps->length() > 1 ? POLYMORPHIC : MONOMORPHIC;
+ number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
return GetICCode(kind(), type, name, state);
}
diff --git a/deps/v8/src/mksnapshot.cc b/deps/v8/src/mksnapshot.cc
index abfe69397..7592a8953 100644
--- a/deps/v8/src/mksnapshot.cc
+++ b/deps/v8/src/mksnapshot.cc
@@ -32,6 +32,10 @@
#endif
#include <signal.h>
+// TODO(dcarney): remove
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
+
#include "v8.h"
#include "bootstrapper.h"
@@ -291,6 +295,18 @@ class BZip2Decompressor : public StartupDataDecompressor {
#endif
+void DumpException(Handle<Message> message) {
+ String::Utf8Value message_string(message->Get());
+ String::Utf8Value message_line(message->GetSourceLine());
+ fprintf(stderr, "%s at line %d\n", *message_string, message->GetLineNumber());
+ fprintf(stderr, "%s\n", *message_line);
+ for (int i = 0; i <= message->GetEndColumn(); ++i) {
+ fprintf(stderr, "%c", i < message->GetStartColumn() ? ' ' : '^');
+ }
+ fprintf(stderr, "\n");
+}
+
+
int main(int argc, char** argv) {
// By default, log code create information in the snapshot.
i::FLAG_log_code = true;
@@ -312,13 +328,18 @@ int main(int argc, char** argv) {
}
#endif
i::Serializer::Enable();
- Persistent<Context> context = v8::Context::New();
+ Isolate* isolate = Isolate::GetCurrent();
+ Persistent<Context> context;
+ {
+ HandleScope handle_scope(isolate);
+ context.Reset(isolate, Context::New(isolate));
+ }
+
if (context.IsEmpty()) {
fprintf(stderr,
"\nException thrown while compiling natives - see above.\n\n");
exit(1);
}
- Isolate* isolate = context->GetIsolate();
if (i::FLAG_extra_code != NULL) {
context->Enter();
// Capture 100 frames if anything happens.
@@ -350,27 +371,14 @@ int main(int argc, char** argv) {
TryCatch try_catch;
Local<Script> script = Script::Compile(source);
if (try_catch.HasCaught()) {
- fprintf(stderr, "Failure compiling '%s' (see above)\n", name);
+ fprintf(stderr, "Failure compiling '%s'\n", name);
+ DumpException(try_catch.Message());
exit(1);
}
script->Run();
if (try_catch.HasCaught()) {
fprintf(stderr, "Failure running '%s'\n", name);
- Local<Message> message = try_catch.Message();
- Local<String> message_string = message->Get();
- Local<String> message_line = message->GetSourceLine();
- int len = 2 + message_string->Utf8Length() + message_line->Utf8Length();
- char* buf = new char(len);
- message_string->WriteUtf8(buf);
- fprintf(stderr, "%s at line %d\n", buf, message->GetLineNumber());
- message_line->WriteUtf8(buf);
- fprintf(stderr, "%s\n", buf);
- int from = message->GetStartColumn();
- int to = message->GetEndColumn();
- int i;
- for (i = 0; i < from; i++) fprintf(stderr, " ");
- for ( ; i <= to; i++) fprintf(stderr, "^");
- fprintf(stderr, "\n");
+ DumpException(try_catch.Message());
exit(1);
}
context->Exit();
diff --git a/deps/v8/src/object-observe.js b/deps/v8/src/object-observe.js
index bfb4a6545..77409b957 100644
--- a/deps/v8/src/object-observe.js
+++ b/deps/v8/src/object-observe.js
@@ -138,7 +138,9 @@ function NotifyChange(type, object, name, oldValue) {
var changeRecord = (arguments.length < 4) ?
{ type: type, object: object, name: name } :
{ type: type, object: object, name: name, oldValue: oldValue };
- ObjectFreeze(changeRecord);
+ // TODO(rafaelw): This breaks spec-compliance. Re-enable when freezing isn't
+ // slow.
+ // ObjectFreeze(changeRecord);
EnqueueChangeRecord(changeRecord, objectInfo.changeObservers);
}
@@ -164,7 +166,9 @@ function ObjectNotifierNotify(changeRecord) {
%DefineOrRedefineDataProperty(newRecord, prop, changeRecord[prop],
READ_ONLY + DONT_DELETE);
}
- ObjectFreeze(newRecord);
+ // TODO(rafaelw): This breaks spec-compliance. Re-enable when freezing isn't
+ // slow.
+ // ObjectFreeze(newRecord);
EnqueueChangeRecord(newRecord, objectInfo.changeObservers);
}
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index ee6df1d2a..ecbf9d642 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -323,10 +323,6 @@ void Map::MapVerify() {
instance_size() < HEAP->Capacity()));
VerifyHeapPointer(prototype());
VerifyHeapPointer(instance_descriptors());
- DescriptorArray* descriptors = instance_descriptors();
- for (int i = 0; i < NumberOfOwnDescriptors(); ++i) {
- CHECK_EQ(i, descriptors->GetDetails(i).descriptor_index() - 1);
- }
SLOW_ASSERT(instance_descriptors()->IsSortedNoDuplicates());
if (HasTransitionArray()) {
SLOW_ASSERT(transitions()->IsSortedNoDuplicates());
@@ -419,6 +415,7 @@ void JSGeneratorObject::JSGeneratorObjectVerify() {
VerifyObjectField(kReceiverOffset);
VerifyObjectField(kOperandStackOffset);
VerifyObjectField(kContinuationOffset);
+ VerifyObjectField(kStackHandlerIndexOffset);
}
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 08378f195..06a13df5a 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -283,6 +283,16 @@ bool Object::HasValidElements() {
return IsFixedArray() || IsFixedDoubleArray() || IsExternalArray();
}
+
+MaybeObject* Object::AllocateNewStorageFor(Heap* heap,
+ Representation representation,
+ PretenureFlag tenure) {
+ if (!FLAG_track_double_fields) return this;
+ if (!representation.IsDouble()) return this;
+ return heap->AllocateHeapNumber(Number(), tenure);
+}
+
+
StringShape::StringShape(String* str)
: type_(str->map()->instance_type()) {
set_valid();
@@ -357,12 +367,8 @@ bool String::IsTwoByteRepresentationUnderneath() {
bool String::HasOnlyOneByteChars() {
uint32_t type = map()->instance_type();
- return (type & kOneByteDataHintMask) == kOneByteDataHintTag;
-}
-
-
-bool String::IsOneByteConvertible() {
- return HasOnlyOneByteChars() || IsOneByteRepresentation();
+ return (type & kOneByteDataHintMask) == kOneByteDataHintTag ||
+ IsOneByteRepresentation();
}
@@ -1513,22 +1519,7 @@ MaybeObject* JSObject::ResetElements() {
}
-MaybeObject* JSObject::AddFastPropertyUsingMap(Map* map) {
- ASSERT(this->map()->NumberOfOwnDescriptors() + 1 ==
- map->NumberOfOwnDescriptors());
- if (this->map()->unused_property_fields() == 0) {
- int new_size = properties()->length() + map->unused_property_fields() + 1;
- FixedArray* new_properties;
- MaybeObject* maybe_properties = properties()->CopySize(new_size);
- if (!maybe_properties->To(&new_properties)) return maybe_properties;
- set_properties(new_properties);
- }
- set_map(map);
- return this;
-}
-
-
-MaybeObject* JSObject::TransitionToMap(Map* map) {
+MaybeObject* JSObject::AllocateStorageForMap(Map* map) {
ASSERT(this->map()->inobject_properties() == map->inobject_properties());
ElementsKind expected_kind = this->map()->elements_kind();
if (map->elements_kind() != expected_kind) {
@@ -1549,6 +1540,14 @@ MaybeObject* JSObject::TransitionToMap(Map* map) {
}
+MaybeObject* JSObject::MigrateInstance() {
+ // Converting any field to the most specific type will cause the
+ // GeneralizeFieldRepresentation algorithm to create the most general existing
+ // transition that matches the object. This achieves what is needed.
+ return GeneralizeFieldRepresentation(0, Representation::Smi());
+}
+
+
Handle<String> JSObject::ExpectedTransitionKey(Handle<Map> map) {
AssertNoAllocation no_gc;
if (!map->HasTransitionArray()) return Handle<String>::null();
@@ -1710,10 +1709,17 @@ void JSObject::SetInternalField(int index, Smi* value) {
}
+MaybeObject* JSObject::FastPropertyAt(Representation representation,
+ int index) {
+ Object* raw_value = RawFastPropertyAt(index);
+ return raw_value->AllocateNewStorageFor(GetHeap(), representation);
+}
+
+
// Access fast-case object properties at index. The use of these routines
// is needed to correctly distinguish between properties stored in-object and
// properties stored in the properties array.
-Object* JSObject::FastPropertyAt(int index) {
+Object* JSObject::RawFastPropertyAt(int index) {
// Adjust for the number of properties stored in the object.
index -= map()->inobject_properties();
if (index < 0) {
@@ -1726,7 +1732,7 @@ Object* JSObject::FastPropertyAt(int index) {
}
-Object* JSObject::FastPropertyAtPut(int index, Object* value) {
+void JSObject::FastPropertyAtPut(int index, Object* value) {
// Adjust for the number of properties stored in the object.
index -= map()->inobject_properties();
if (index < 0) {
@@ -1737,7 +1743,6 @@ Object* JSObject::FastPropertyAtPut(int index, Object* value) {
ASSERT(index < properties()->length());
properties()->set(index, value);
}
- return value;
}
@@ -2277,6 +2282,23 @@ void DescriptorArray::SetSortedKey(int descriptor_index, int pointer) {
}
+void DescriptorArray::SetRepresentation(int descriptor_index,
+ Representation representation) {
+ ASSERT(!representation.IsNone());
+ PropertyDetails details = GetDetails(descriptor_index);
+ set(ToDetailsIndex(descriptor_index),
+ details.CopyWithRepresentation(representation).AsSmi());
+}
+
+
+void DescriptorArray::InitializeRepresentations(Representation representation) {
+ int length = number_of_descriptors();
+ for (int i = 0; i < length; i++) {
+ SetRepresentation(i, representation);
+ }
+}
+
+
Object** DescriptorArray::GetValueSlot(int descriptor_number) {
ASSERT(descriptor_number < number_of_descriptors());
return HeapObject::RawField(
@@ -2338,10 +2360,8 @@ void DescriptorArray::Set(int descriptor_number,
const WhitenessWitness&) {
// Range check.
ASSERT(descriptor_number < number_of_descriptors());
- ASSERT(desc->GetDetails().descriptor_index() <=
- number_of_descriptors());
- ASSERT(desc->GetDetails().descriptor_index() > 0);
+ ASSERT(!desc->GetDetails().representation().IsNone());
NoIncrementalWriteBarrierSet(this,
ToKeyIndex(descriptor_number),
desc->GetKey());
@@ -2357,9 +2377,7 @@ void DescriptorArray::Set(int descriptor_number,
void DescriptorArray::Set(int descriptor_number, Descriptor* desc) {
// Range check.
ASSERT(descriptor_number < number_of_descriptors());
- ASSERT(desc->GetDetails().descriptor_index() <=
- number_of_descriptors());
- ASSERT(desc->GetDetails().descriptor_index() > 0);
+ ASSERT(!desc->GetDetails().representation().IsNone());
set(ToKeyIndex(descriptor_number), desc->GetKey());
set(ToValueIndex(descriptor_number), desc->GetValue());
@@ -2370,9 +2388,7 @@ void DescriptorArray::Set(int descriptor_number, Descriptor* desc) {
void DescriptorArray::Append(Descriptor* desc,
const WhitenessWitness& witness) {
int descriptor_number = number_of_descriptors();
- int enumeration_index = descriptor_number + 1;
SetNumberOfDescriptors(descriptor_number + 1);
- desc->SetEnumerationIndex(enumeration_index);
Set(descriptor_number, desc, witness);
uint32_t hash = desc->GetKey()->Hash();
@@ -2391,9 +2407,7 @@ void DescriptorArray::Append(Descriptor* desc,
void DescriptorArray::Append(Descriptor* desc) {
int descriptor_number = number_of_descriptors();
- int enumeration_index = descriptor_number + 1;
SetNumberOfDescriptors(descriptor_number + 1);
- desc->SetEnumerationIndex(enumeration_index);
Set(descriptor_number, desc);
uint32_t hash = desc->GetKey()->Hash();
@@ -3573,6 +3587,38 @@ bool Map::is_observed() {
}
+void Map::deprecate() {
+ set_bit_field3(Deprecated::update(bit_field3(), true));
+}
+
+
+bool Map::is_deprecated() {
+ if (!FLAG_track_fields) return false;
+ return Deprecated::decode(bit_field3());
+}
+
+
+bool Map::CanBeDeprecated() {
+ int descriptor = LastAdded();
+ for (int i = 0; i <= descriptor; i++) {
+ PropertyDetails details = instance_descriptors()->GetDetails(i);
+ if (FLAG_track_fields && details.representation().IsSmi()) {
+ return true;
+ }
+ if (FLAG_track_double_fields && details.representation().IsDouble()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
+Handle<Map> Map::CurrentMapForDeprecated(Handle<Map> map) {
+ if (!map->is_deprecated()) return map;
+ return GeneralizeRepresentation(map, 0, Representation::Smi());
+}
+
+
void Map::NotifyLeafMapLayoutChange() {
dependent_code()->DeoptimizeDependentCodeGroup(
GetIsolate(),
@@ -4109,23 +4155,6 @@ static MaybeObject* EnsureHasTransitionArray(Map* map) {
void Map::InitializeDescriptors(DescriptorArray* descriptors) {
int len = descriptors->number_of_descriptors();
-#ifdef DEBUG
- ASSERT(len <= DescriptorArray::kMaxNumberOfDescriptors);
-
- bool used_indices[DescriptorArray::kMaxNumberOfDescriptors];
- for (int i = 0; i < len; ++i) used_indices[i] = false;
-
- // Ensure that all enumeration indexes between 1 and length occur uniquely in
- // the descriptor array.
- for (int i = 0; i < len; ++i) {
- int enum_index = descriptors->GetDetails(i).descriptor_index() -
- PropertyDetails::kInitialIndex;
- ASSERT(0 <= enum_index && enum_index < len);
- ASSERT(!used_indices[enum_index]);
- used_indices[enum_index] = true;
- }
-#endif
-
set_instance_descriptors(descriptors);
SetNumberOfOwnDescriptors(len);
}
@@ -5095,6 +5124,7 @@ ACCESSORS(JSGeneratorObject, context, Context, kContextOffset)
ACCESSORS(JSGeneratorObject, receiver, Object, kReceiverOffset)
SMI_ACCESSORS(JSGeneratorObject, continuation, kContinuationOffset)
ACCESSORS(JSGeneratorObject, operand_stack, FixedArray, kOperandStackOffset)
+SMI_ACCESSORS(JSGeneratorObject, stack_handler_index, kStackHandlerIndexOffset)
JSGeneratorObject* JSGeneratorObject::cast(Object* obj) {
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index 5aeeec656..0849a630c 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -271,7 +271,7 @@ void JSObject::PrintProperties(FILE* out) {
switch (descs->GetType(i)) {
case FIELD: {
int index = descs->GetFieldIndex(i);
- FastPropertyAt(index)->ShortPrint(out);
+ RawFastPropertyAt(index)->ShortPrint(out);
PrintF(out, " (field at offset %d)\n", index);
break;
}
diff --git a/deps/v8/src/objects-visiting-inl.h b/deps/v8/src/objects-visiting-inl.h
index 6a64cbf80..add247ea4 100644
--- a/deps/v8/src/objects-visiting-inl.h
+++ b/deps/v8/src/objects-visiting-inl.h
@@ -571,6 +571,12 @@ bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(
return false;
}
+ // If this is a native function we do not flush the code because %SetCode
+ // breaks the one-to-one relation between SharedFunctionInfo and Code.
+ if (shared_info->native()) {
+ return false;
+ }
+
if (FLAG_age_code) {
return shared_info->code()->IsOld();
} else {
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 128c04da4..d127d1bb8 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -815,11 +815,14 @@ MaybeObject* Object::GetProperty(Object* receiver,
value = result->holder()->GetNormalizedProperty(result);
ASSERT(!value->IsTheHole() || result->IsReadOnly());
return value->IsTheHole() ? heap->undefined_value() : value;
- case FIELD:
- value = result->holder()->FastPropertyAt(
+ case FIELD: {
+ MaybeObject* maybe_result = result->holder()->FastPropertyAt(
+ result->representation(),
result->GetFieldIndex().field_index());
+ if (!maybe_result->To(&value)) return maybe_result;
ASSERT(!value->IsTheHole() || result->IsReadOnly());
return value->IsTheHole() ? heap->undefined_value() : value;
+ }
case CONSTANT_FUNCTION:
return result->GetConstantFunction();
case CALLBACKS:
@@ -1711,18 +1714,29 @@ String* JSReceiver::constructor_name() {
MaybeObject* JSObject::AddFastPropertyUsingMap(Map* new_map,
Name* name,
Object* value,
- int field_index) {
+ int field_index,
+ Representation representation) {
+ // This method is used to transition to a field. If we are transitioning to a
+ // double field, allocate new storage.
+ Object* storage;
+ MaybeObject* maybe_storage =
+ value->AllocateNewStorageFor(GetHeap(), representation);
+ if (!maybe_storage->To(&storage)) return maybe_storage;
+
if (map()->unused_property_fields() == 0) {
int new_unused = new_map->unused_property_fields();
FixedArray* values;
- { MaybeObject* maybe_values =
- properties()->CopySize(properties()->length() + new_unused + 1);
- if (!maybe_values->To(&values)) return maybe_values;
- }
+ MaybeObject* maybe_values =
+ properties()->CopySize(properties()->length() + new_unused + 1);
+ if (!maybe_values->To(&values)) return maybe_values;
+
set_properties(values);
}
+
set_map(new_map);
- return FastPropertyAtPut(field_index, value);
+
+ FastPropertyAtPut(field_index, storage);
+ return value;
}
@@ -1774,7 +1788,8 @@ MaybeObject* JSObject::AddFastProperty(Name* name,
int index = map()->NextFreePropertyIndex();
// Allocate new instance descriptors with (name, index) added
- FieldDescriptor new_field(name, index, attributes, 0);
+ Representation representation = value->OptimalRepresentation();
+ FieldDescriptor new_field(name, index, attributes, representation);
ASSERT(index < map()->inobject_properties() ||
(index - map()->inobject_properties()) < properties()->length() ||
@@ -1782,6 +1797,7 @@ MaybeObject* JSObject::AddFastProperty(Name* name,
FixedArray* values = NULL;
+ // TODO(verwaest): Merge with AddFastPropertyUsingMap.
if (map()->unused_property_fields() == 0) {
// Make room for the new value
MaybeObject* maybe_values =
@@ -1791,10 +1807,17 @@ MaybeObject* JSObject::AddFastProperty(Name* name,
TransitionFlag flag = INSERT_TRANSITION;
+ Heap* heap = isolate->heap();
+
Map* new_map;
MaybeObject* maybe_new_map = map()->CopyAddDescriptor(&new_field, flag);
if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ Object* storage;
+ MaybeObject* maybe_storage =
+ value->AllocateNewStorageFor(heap, representation);
+ if (!maybe_storage->To(&storage)) return maybe_storage;
+
if (map()->unused_property_fields() == 0) {
ASSERT(values != NULL);
set_properties(values);
@@ -1804,7 +1827,9 @@ MaybeObject* JSObject::AddFastProperty(Name* name,
}
set_map(new_map);
- return FastPropertyAtPut(index, value);
+
+ FastPropertyAtPut(index, storage);
+ return value;
}
@@ -1813,7 +1838,7 @@ MaybeObject* JSObject::AddConstantFunctionProperty(
JSFunction* function,
PropertyAttributes attributes) {
// Allocate new instance descriptors with (name, function) added
- ConstantFunctionDescriptor d(name, function, attributes, 0);
+ ConstantFunctionDescriptor d(name, function, attributes);
TransitionFlag flag =
// Do not add transitions to global objects.
@@ -1861,7 +1886,7 @@ MaybeObject* JSObject::AddSlowProperty(Name* name,
}
JSGlobalPropertyCell::cast(store_value)->set_value(value);
}
- PropertyDetails details = PropertyDetails(attributes, NORMAL);
+ PropertyDetails details = PropertyDetails(attributes, NORMAL, 0);
Object* result;
{ MaybeObject* maybe_result = dict->Add(name, store_value, details);
if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -2028,7 +2053,6 @@ MaybeObject* JSObject::ConvertTransitionToMapTransition(
// TODO(verwaest): From here on we lose existing map transitions, causing
// invalid back pointers. This will change once we can store multiple
// transitions with the same key.
-
bool owned_descriptors = old_map->owns_descriptors();
if (owned_descriptors ||
old_target->instance_descriptors() == old_map->instance_descriptors()) {
@@ -2049,6 +2073,8 @@ MaybeObject* JSObject::ConvertTransitionToMapTransition(
old_map->set_owns_descriptors(false);
}
+ old_target->DeprecateTransitionTree();
+
old_map->SetTransition(transition_index, new_map);
new_map->SetBackPointer(old_map);
return result;
@@ -2066,8 +2092,9 @@ MaybeObject* JSObject::ConvertDescriptorToField(Name* name,
return ReplaceSlowProperty(name, new_value, attributes);
}
+ Representation representation = new_value->OptimalRepresentation();
int index = map()->NextFreePropertyIndex();
- FieldDescriptor new_field(name, index, attributes, 0);
+ FieldDescriptor new_field(name, index, attributes, representation);
// Make a new map for the object.
Map* new_map;
@@ -2085,6 +2112,12 @@ MaybeObject* JSObject::ConvertDescriptorToField(Name* name,
if (!maybe_new_properties->To(&new_properties)) return maybe_new_properties;
}
+ Heap* heap = GetHeap();
+ Object* storage;
+ MaybeObject* maybe_storage =
+ new_value->AllocateNewStorageFor(heap, representation);
+ if (!maybe_storage->To(&storage)) return maybe_storage;
+
// Update pointers to commit changes.
// Object points to the new map.
new_map->set_unused_property_fields(new_unused_property_fields);
@@ -2092,10 +2125,463 @@ MaybeObject* JSObject::ConvertDescriptorToField(Name* name,
if (new_properties != NULL) {
set_properties(new_properties);
}
- return FastPropertyAtPut(index, new_value);
+ FastPropertyAtPut(index, new_value);
+ return new_value;
+}
+
+
+const char* Representation::Mnemonic() const {
+ switch (kind_) {
+ case kNone: return "v";
+ case kTagged: return "t";
+ case kSmi: return "s";
+ case kDouble: return "d";
+ case kInteger32: return "i";
+ case kExternal: return "x";
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+enum RightTrimMode { FROM_GC, FROM_MUTATOR };
+
+
+static void ZapEndOfFixedArray(Address new_end, int to_trim) {
+ // If we are doing a big trim in old space then we zap the space.
+ Object** zap = reinterpret_cast<Object**>(new_end);
+ zap++; // Header of filler must be at least one word so skip that.
+ for (int i = 1; i < to_trim; i++) {
+ *zap++ = Smi::FromInt(0);
+ }
+}
+
+
+template<RightTrimMode trim_mode>
+static void RightTrimFixedArray(Heap* heap, FixedArray* elms, int to_trim) {
+ ASSERT(elms->map() != HEAP->fixed_cow_array_map());
+ // For now this trick is only applied to fixed arrays in new and paged space.
+ ASSERT(!HEAP->lo_space()->Contains(elms));
+
+ const int len = elms->length();
+
+ ASSERT(to_trim < len);
+
+ Address new_end = elms->address() + FixedArray::SizeFor(len - to_trim);
+
+ if (trim_mode != FROM_GC || Heap::ShouldZapGarbage()) {
+ ZapEndOfFixedArray(new_end, to_trim);
+ }
+
+ int size_delta = to_trim * kPointerSize;
+
+ // Technically in new space this write might be omitted (except for
+ // debug mode which iterates through the heap), but to play safer
+ // we still do it.
+ heap->CreateFillerObjectAt(new_end, size_delta);
+
+ elms->set_length(len - to_trim);
+
+ // Maintain marking consistency for IncrementalMarking.
+ if (Marking::IsBlack(Marking::MarkBitFrom(elms))) {
+ if (trim_mode == FROM_GC) {
+ MemoryChunk::IncrementLiveBytesFromGC(elms->address(), -size_delta);
+ } else {
+ MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta);
+ }
+ }
+}
+
+
+bool Map::InstancesNeedRewriting(Map* target,
+ int target_number_of_fields,
+ int target_inobject,
+ int target_unused) {
+ // If fields were added (or removed), rewrite the instance.
+ int number_of_fields = NumberOfFields();
+ ASSERT(target_number_of_fields >= number_of_fields);
+ if (target_number_of_fields != number_of_fields) return true;
+
+ if (FLAG_track_double_fields) {
+ // If smi descriptors were replaced by double descriptors, rewrite.
+ DescriptorArray* old_desc = instance_descriptors();
+ DescriptorArray* new_desc = target->instance_descriptors();
+ int limit = NumberOfOwnDescriptors();
+ for (int i = 0; i < limit; i++) {
+ if (new_desc->GetDetails(i).representation().IsDouble() &&
+ old_desc->GetDetails(i).representation().IsSmi()) {
+ return true;
+ }
+ }
+ }
+
+ // If no fields were added, and no inobject properties were removed, setting
+ // the map is sufficient.
+ if (target_inobject == inobject_properties()) return false;
+ // In-object slack tracking may have reduced the object size of the new map.
+ // In that case, succeed if all existing fields were inobject, and they still
+ // fit within the new inobject size.
+ ASSERT(target_inobject < inobject_properties());
+ if (target_number_of_fields <= target_inobject) {
+ ASSERT(target_number_of_fields + target_unused == target_inobject);
+ return false;
+ }
+ // Otherwise, properties will need to be moved to the backing store.
+ return true;
+}
+
+
+// To migrate an instance to a map:
+// - First check whether the instance needs to be rewritten. If not, simply
+// change the map.
+// - Otherwise, allocate a fixed array large enough to hold all fields, in
+// addition to unused space.
+// - Copy all existing properties in, in the following order: backing store
+// properties, unused fields, inobject properties.
+// - If all allocation succeeded, commit the state atomically:
+// * Copy inobject properties from the backing store back into the object.
+// * Trim the difference in instance size of the object. This also cleanly
+// frees inobject properties that moved to the backing store.
+// * If there are properties left in the backing store, trim of the space used
+// to temporarily store the inobject properties.
+// * If there are properties left in the backing store, install the backing
+// store.
+MaybeObject* JSObject::MigrateToMap(Map* new_map) {
+ Heap* heap = GetHeap();
+ Map* old_map = map();
+ int number_of_fields = new_map->NumberOfFields();
+ int inobject = new_map->inobject_properties();
+ int unused = new_map->unused_property_fields();
+
+ // Nothing to do if no functions were converted to fields.
+ if (!old_map->InstancesNeedRewriting(
+ new_map, number_of_fields, inobject, unused)) {
+ set_map(new_map);
+ return this;
+ }
+
+ int total_size = number_of_fields + unused;
+ int external = total_size - inobject;
+ FixedArray* array;
+ MaybeObject* maybe_array = heap->AllocateFixedArray(total_size);
+ if (!maybe_array->To(&array)) return maybe_array;
+
+ DescriptorArray* old_descriptors = old_map->instance_descriptors();
+ DescriptorArray* new_descriptors = new_map->instance_descriptors();
+ int descriptors = new_map->NumberOfOwnDescriptors();
+
+ for (int i = 0; i < descriptors; i++) {
+ PropertyDetails details = new_descriptors->GetDetails(i);
+ if (details.type() != FIELD) continue;
+ PropertyDetails old_details = old_descriptors->GetDetails(i);
+ ASSERT(old_details.type() == CONSTANT_FUNCTION ||
+ old_details.type() == FIELD);
+ Object* value = old_details.type() == CONSTANT_FUNCTION
+ ? old_descriptors->GetValue(i)
+ : RawFastPropertyAt(old_descriptors->GetFieldIndex(i));
+ if (FLAG_track_double_fields &&
+ old_details.representation().IsSmi() &&
+ details.representation().IsDouble()) {
+ // Objects must be allocated in the old object space, since the
+ // overall number of HeapNumbers needed for the conversion might
+ // exceed the capacity of new space, and we would fail repeatedly
+ // trying to migrate the instance.
+ MaybeObject* maybe_storage =
+ value->AllocateNewStorageFor(heap, details.representation(), TENURED);
+ if (!maybe_storage->To(&value)) return maybe_storage;
+ }
+ ASSERT(!(FLAG_track_double_fields &&
+ details.representation().IsDouble() &&
+ value->IsSmi()));
+ int target_index = new_descriptors->GetFieldIndex(i) - inobject;
+ if (target_index < 0) target_index += total_size;
+ array->set(target_index, value);
+ }
+
+ // From here on we cannot fail anymore.
+
+ // Copy (real) inobject properties. If necessary, stop at number_of_fields to
+ // avoid overwriting |one_pointer_filler_map|.
+ int limit = Min(inobject, number_of_fields);
+ for (int i = 0; i < limit; i++) {
+ FastPropertyAtPut(i, array->get(external + i));
+ }
+
+ // Create filler object past the new instance size.
+ int new_instance_size = new_map->instance_size();
+ int instance_size_delta = old_map->instance_size() - new_instance_size;
+ ASSERT(instance_size_delta >= 0);
+ Address address = this->address() + new_instance_size;
+ heap->CreateFillerObjectAt(address, instance_size_delta);
+
+ // If there are properties in the new backing store, trim it to the correct
+ // size and install the backing store into the object.
+ if (external > 0) {
+ RightTrimFixedArray<FROM_MUTATOR>(heap, array, inobject);
+ set_properties(array);
+ }
+
+ set_map(new_map);
+
+ return this;
+}
+
+
+MaybeObject* JSObject::GeneralizeFieldRepresentation(
+ int modify_index,
+ Representation new_representation) {
+ Map* new_map;
+ MaybeObject* maybe_new_map =
+ map()->GeneralizeRepresentation(modify_index, new_representation);
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ ASSERT(map() != new_map || new_map->FindRootMap()->is_deprecated());
+
+ return MigrateToMap(new_map);
+}
+
+
+int Map::NumberOfFields() {
+ DescriptorArray* descriptors = instance_descriptors();
+ int result = 0;
+ for (int i = 0; i < NumberOfOwnDescriptors(); i++) {
+ if (descriptors->GetDetails(i).type() == FIELD) result++;
+ }
+ return result;
+}
+
+
+MaybeObject* Map::CopyGeneralizeAllRepresentations() {
+ Map* new_map;
+ MaybeObject* maybe_map = this->Copy();
+ if (!maybe_map->To(&new_map)) return maybe_map;
+
+ new_map->instance_descriptors()->InitializeRepresentations(
+ Representation::Tagged());
+ if (FLAG_trace_generalization) {
+ PrintF("failed generalization %p -> %p\n",
+ static_cast<void*>(this), static_cast<void*>(new_map));
+ }
+ return new_map;
+}
+
+
+void Map::DeprecateTransitionTree() {
+ if (!FLAG_track_fields) return;
+ if (is_deprecated()) return;
+ if (HasTransitionArray()) {
+ TransitionArray* transitions = this->transitions();
+ for (int i = 0; i < transitions->number_of_transitions(); i++) {
+ transitions->GetTarget(i)->DeprecateTransitionTree();
+ }
+ }
+ deprecate();
+ dependent_code()->DeoptimizeDependentCodeGroup(
+ GetIsolate(), DependentCode::kTransitionGroup);
+ dependent_code()->DeoptimizeDependentCodeGroup(
+ GetIsolate(), DependentCode::kPrototypeCheckGroup);
+}
+
+
+// Invalidates a transition target at |key|, and installs |new_descriptors| over
+// the current instance_descriptors to ensure proper sharing of descriptor
+// arrays.
+void Map::DeprecateTarget(Name* key, DescriptorArray* new_descriptors) {
+ if (HasTransitionArray()) {
+ TransitionArray* transitions = this->transitions();
+ int transition = transitions->Search(key);
+ if (transition != TransitionArray::kNotFound) {
+ transitions->GetTarget(transition)->DeprecateTransitionTree();
+ }
+ }
+
+ // Don't overwrite the empty descriptor array.
+ if (NumberOfOwnDescriptors() == 0) return;
+
+ DescriptorArray* to_replace = instance_descriptors();
+ Map* current = this;
+ while (current->instance_descriptors() == to_replace) {
+ current->SetEnumLength(Map::kInvalidEnumCache);
+ current->set_instance_descriptors(new_descriptors);
+ Object* next = current->GetBackPointer();
+ if (next->IsUndefined()) break;
+ current = Map::cast(next);
+ }
+
+ set_owns_descriptors(false);
+}
+
+
+Map* Map::FindRootMap() {
+ Map* result = this;
+ while (true) {
+ Object* back = result->GetBackPointer();
+ if (back->IsUndefined()) return result;
+ result = Map::cast(back);
+ }
+}
+
+
+Map* Map::FindUpdatedMap(int verbatim,
+ int length,
+ DescriptorArray* descriptors) {
+ // This can only be called on roots of transition trees.
+ ASSERT(GetBackPointer()->IsUndefined());
+
+ Map* current = this;
+
+ for (int i = verbatim; i < length; i++) {
+ if (!current->HasTransitionArray()) break;
+ Name* name = descriptors->GetKey(i);
+ TransitionArray* transitions = current->transitions();
+ int transition = transitions->Search(name);
+ if (transition == TransitionArray::kNotFound) break;
+ current = transitions->GetTarget(transition);
+ }
+
+ return current;
+}
+
+
+Map* Map::FindLastMatchMap(int verbatim,
+ int length,
+ DescriptorArray* descriptors) {
+ // This can only be called on roots of transition trees.
+ ASSERT(GetBackPointer()->IsUndefined());
+
+ Map* current = this;
+
+ for (int i = verbatim; i < length; i++) {
+ if (!current->HasTransitionArray()) break;
+ Name* name = descriptors->GetKey(i);
+ TransitionArray* transitions = current->transitions();
+ int transition = transitions->Search(name);
+ if (transition == TransitionArray::kNotFound) break;
+
+ Map* next = transitions->GetTarget(transition);
+ DescriptorArray* next_descriptors = next->instance_descriptors();
+
+ if (next_descriptors->GetValue(i) != descriptors->GetValue(i)) break;
+
+ PropertyDetails details = descriptors->GetDetails(i);
+ PropertyDetails next_details = next_descriptors->GetDetails(i);
+ if (details.type() != next_details.type()) break;
+ if (details.attributes() != next_details.attributes()) break;
+ if (!details.representation().Equals(next_details.representation())) break;
+ ASSERT(!details.IsDeleted());
+ ASSERT(!next_details.IsDeleted());
+
+ current = next;
+ }
+ return current;
}
+// Generalize the representation of the descriptor at |modify_index|.
+// This method rewrites the transition tree to reflect the new change. To avoid
+// high degrees over polymorphism, and to stabilize quickly, on every rewrite
+// the new type is deduced by merging the current type with any potential new
+// (partial) version of the type in the transition tree.
+// To do this, on each rewrite:
+// - Search the root of the transition tree using FindRootMap.
+// - Find |updated|, the newest matching version of this map using
+// FindUpdatedMap. This uses the keys in the own map's descriptor array to
+// walk the transition tree.
+// - Merge/generalize the descriptor array of the current map and |updated|.
+// - Generalize the |modify_index| descriptor using |new_representation|.
+// - Walk the tree again starting from the root towards |updated|. Stop at
+// |split_map|, the first map who's descriptor array does not match the merged
+// descriptor array.
+// - If |updated| == |split_map|, |updated| is in the expected state. Return it.
+// - Otherwise, invalidate the outdated transition target from |updated|, and
+// replace its transition tree with a new branch for the updated descriptors.
+MaybeObject* Map::GeneralizeRepresentation(int modify_index,
+ Representation new_representation) {
+ Map* old_map = this;
+ DescriptorArray* old_descriptors = old_map->instance_descriptors();
+ Representation old_reprepresentation =
+ old_descriptors->GetDetails(modify_index).representation();
+
+ if (old_reprepresentation.IsNone()) {
+ UNREACHABLE();
+ old_descriptors->SetRepresentation(modify_index, new_representation);
+ return this;
+ }
+
+ int descriptors = old_map->NumberOfOwnDescriptors();
+ Map* root_map = old_map->FindRootMap();
+
+ if (!old_map->EquivalentToForTransition(root_map)) {
+ return CopyGeneralizeAllRepresentations();
+ }
+
+ int verbatim = root_map->NumberOfOwnDescriptors();
+
+ Map* updated = root_map->FindUpdatedMap(
+ verbatim, descriptors, old_descriptors);
+ // Check the state of the root map.
+ DescriptorArray* updated_descriptors = updated->instance_descriptors();
+
+ int valid = updated->NumberOfOwnDescriptors();
+ if (updated_descriptors->IsMoreGeneralThan(
+ verbatim, valid, descriptors, old_descriptors)) {
+ Representation updated_representation =
+ updated_descriptors->GetDetails(modify_index).representation();
+ if (new_representation.fits_into(updated_representation)) {
+ if (FLAG_trace_generalization) {
+ PrintF("migrating to existing map %p -> %p\n",
+ static_cast<void*>(this), static_cast<void*>(updated));
+ }
+ return updated;
+ }
+ }
+
+ DescriptorArray* new_descriptors;
+ MaybeObject* maybe_descriptors = updated_descriptors->Merge(
+ verbatim, valid, descriptors, old_descriptors);
+ if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
+
+ old_reprepresentation =
+ new_descriptors->GetDetails(modify_index).representation();
+ new_representation = new_representation.generalize(old_reprepresentation);
+ new_descriptors->SetRepresentation(modify_index, new_representation);
+
+ Map* split_map = root_map->FindLastMatchMap(
+ verbatim, descriptors, new_descriptors);
+
+ int split_descriptors = split_map->NumberOfOwnDescriptors();
+ // This is shadowed by |updated_descriptors| being more general than
+ // |old_descriptors|.
+ ASSERT(descriptors != split_descriptors);
+
+ int descriptor = split_descriptors;
+ split_map->DeprecateTarget(
+ old_descriptors->GetKey(descriptor), new_descriptors);
+
+ if (FLAG_trace_generalization) {
+ PrintF("migrating to new map %p -> %p (%i steps)\n",
+ static_cast<void*>(this),
+ static_cast<void*>(new_descriptors),
+ descriptors - descriptor);
+ }
+
+ Map* new_map = split_map;
+ // Add missing transitions.
+ for (; descriptor < descriptors; descriptor++) {
+ MaybeObject* maybe_map = new_map->CopyInstallDescriptors(
+ descriptor, new_descriptors);
+ if (!maybe_map->To(&new_map)) {
+ // Create a handle for the last created map to ensure it stays alive
+ // during GC. Its descriptor array is too large, but it will be
+ // overwritten during retry anyway.
+ Handle<Map>(new_map);
+ }
+ }
+
+ new_map->set_owns_descriptors(true);
+ return new_map;
+}
+
MaybeObject* JSObject::SetPropertyWithInterceptor(
Name* name,
@@ -2391,55 +2877,6 @@ MaybeObject* JSObject::SetPropertyViaPrototypes(
}
-enum RightTrimMode { FROM_GC, FROM_MUTATOR };
-
-
-static void ZapEndOfFixedArray(Address new_end, int to_trim) {
- // If we are doing a big trim in old space then we zap the space.
- Object** zap = reinterpret_cast<Object**>(new_end);
- zap++; // Header of filler must be at least one word so skip that.
- for (int i = 1; i < to_trim; i++) {
- *zap++ = Smi::FromInt(0);
- }
-}
-
-
-template<RightTrimMode trim_mode>
-static void RightTrimFixedArray(Heap* heap, FixedArray* elms, int to_trim) {
- ASSERT(elms->map() != HEAP->fixed_cow_array_map());
- // For now this trick is only applied to fixed arrays in new and paged space.
- ASSERT(!HEAP->lo_space()->Contains(elms));
-
- const int len = elms->length();
-
- ASSERT(to_trim < len);
-
- Address new_end = elms->address() + FixedArray::SizeFor(len - to_trim);
-
- if (trim_mode != FROM_GC || Heap::ShouldZapGarbage()) {
- ZapEndOfFixedArray(new_end, to_trim);
- }
-
- int size_delta = to_trim * kPointerSize;
-
- // Technically in new space this write might be omitted (except for
- // debug mode which iterates through the heap), but to play safer
- // we still do it.
- heap->CreateFillerObjectAt(new_end, size_delta);
-
- elms->set_length(len - to_trim);
-
- // Maintain marking consistency for IncrementalMarking.
- if (Marking::IsBlack(Marking::MarkBitFrom(elms))) {
- if (trim_mode == FROM_GC) {
- MemoryChunk::IncrementLiveBytesFromGC(elms->address(), -size_delta);
- } else {
- MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta);
- }
- }
-}
-
-
void Map::EnsureDescriptorSlack(Handle<Map> map, int slack) {
Handle<DescriptorArray> descriptors(map->instance_descriptors());
if (slack <= descriptors->NumberOfSlackDescriptors()) return;
@@ -2670,7 +3107,7 @@ void JSObject::LocalLookupRealNamedProperty(Name* name, LookupResult* result) {
// occur as fields.
if (result->IsField() &&
result->IsReadOnly() &&
- FastPropertyAt(result->GetFieldIndex().field_index())->IsTheHole()) {
+ RawFastPropertyAt(result->GetFieldIndex().field_index())->IsTheHole()) {
result->DisallowCaching();
}
return;
@@ -3103,18 +3540,32 @@ MUST_USE_RESULT Handle<Object> JSProxy::CallTrap(const char* name,
}
-void JSObject::AddFastPropertyUsingMap(Handle<JSObject> object,
- Handle<Map> map) {
+void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
CALL_HEAP_FUNCTION_VOID(
object->GetIsolate(),
- object->AddFastPropertyUsingMap(*map));
+ object->AllocateStorageForMap(*map));
}
-void JSObject::TransitionToMap(Handle<JSObject> object, Handle<Map> map) {
+void JSObject::MigrateInstance(Handle<JSObject> object) {
+ if (FLAG_trace_migration) {
+ PrintF("migrating instance %p (%p)\n",
+ static_cast<void*>(*object),
+ static_cast<void*>(object->map()));
+ }
CALL_HEAP_FUNCTION_VOID(
object->GetIsolate(),
- object->TransitionToMap(*map));
+ object->MigrateInstance());
+}
+
+
+Handle<Map> Map::GeneralizeRepresentation(Handle<Map> map,
+ int modify_index,
+ Representation representation) {
+ CALL_HEAP_FUNCTION(
+ map->GetIsolate(),
+ map->GeneralizeRepresentation(modify_index, representation),
+ Map);
}
@@ -3206,10 +3657,30 @@ MaybeObject* JSObject::SetPropertyForResult(LookupResult* lookup,
case NORMAL:
result = lookup->holder()->SetNormalizedProperty(lookup, *value);
break;
- case FIELD:
- result = lookup->holder()->FastPropertyAtPut(
+ case FIELD: {
+ Representation representation = lookup->representation();
+ if (!value->FitsRepresentation(representation)) {
+ MaybeObject* maybe_failure =
+ lookup->holder()->GeneralizeFieldRepresentation(
+ lookup->GetDescriptorIndex(), value->OptimalRepresentation());
+ if (maybe_failure->IsFailure()) return maybe_failure;
+ DescriptorArray* desc = lookup->holder()->map()->instance_descriptors();
+ int descriptor = lookup->GetDescriptorIndex();
+ representation = desc->GetDetails(descriptor).representation();
+ }
+ if (FLAG_track_double_fields && representation.IsDouble()) {
+ HeapNumber* storage =
+ HeapNumber::cast(lookup->holder()->RawFastPropertyAt(
+ lookup->GetFieldIndex().field_index()));
+ storage->set_value(value->Number());
+ result = *value;
+ break;
+ }
+ lookup->holder()->FastPropertyAtPut(
lookup->GetFieldIndex().field_index(), *value);
+ result = *value;
break;
+ }
case CONSTANT_FUNCTION:
// Only replace the function if necessary.
if (*value == lookup->GetConstantFunction()) return *value;
@@ -3236,9 +3707,24 @@ MaybeObject* JSObject::SetPropertyForResult(LookupResult* lookup,
if (details.type() == FIELD) {
if (attributes == details.attributes()) {
+ Representation representation = details.representation();
+ if (!value->FitsRepresentation(representation)) {
+ MaybeObject* maybe_map = transition_map->GeneralizeRepresentation(
+ descriptor, value->OptimalRepresentation());
+ if (!maybe_map->To(&transition_map)) return maybe_map;
+ Object* back = transition_map->GetBackPointer();
+ if (back->IsMap()) {
+ MaybeObject* maybe_failure =
+ lookup->holder()->MigrateToMap(Map::cast(back));
+ if (maybe_failure->IsFailure()) return maybe_failure;
+ }
+ DescriptorArray* desc = transition_map->instance_descriptors();
+ int descriptor = transition_map->LastAdded();
+ representation = desc->GetDetails(descriptor).representation();
+ }
int field_index = descriptors->GetFieldIndex(descriptor);
result = lookup->holder()->AddFastPropertyUsingMap(
- transition_map, *name, *value, field_index);
+ transition_map, *name, *value, field_index, representation);
} else {
result = lookup->holder()->ConvertDescriptorToField(
*name, *value, attributes);
@@ -3368,14 +3854,32 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
MaybeObject* result = *value;
switch (lookup.type()) {
case NORMAL: {
- PropertyDetails details = PropertyDetails(attributes, NORMAL);
+ PropertyDetails details = PropertyDetails(attributes, NORMAL, 0);
result = self->SetNormalizedProperty(*name, *value, details);
break;
}
- case FIELD:
- result = self->FastPropertyAtPut(
- lookup.GetFieldIndex().field_index(), *value);
+ case FIELD: {
+ Representation representation = lookup.representation();
+ if (!value->FitsRepresentation(representation)) {
+ MaybeObject* maybe_failure = self->GeneralizeFieldRepresentation(
+ lookup.GetDescriptorIndex(), value->OptimalRepresentation());
+ if (maybe_failure->IsFailure()) return maybe_failure;
+ DescriptorArray* desc = self->map()->instance_descriptors();
+ int descriptor = lookup.GetDescriptorIndex();
+ representation = desc->GetDetails(descriptor).representation();
+ }
+ if (FLAG_track_double_fields && representation.IsDouble()) {
+ HeapNumber* storage =
+ HeapNumber::cast(self->RawFastPropertyAt(
+ lookup.GetFieldIndex().field_index()));
+ storage->set_value(value->Number());
+ result = *value;
+ break;
+ }
+ self->FastPropertyAtPut(lookup.GetFieldIndex().field_index(), *value);
+ result = *value;
break;
+ }
case CONSTANT_FUNCTION:
// Only replace the function if necessary.
if (*value != lookup.GetConstantFunction()) {
@@ -3398,9 +3902,23 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
if (details.type() == FIELD) {
if (attributes == details.attributes()) {
+ Representation representation = details.representation();
+ if (!value->FitsRepresentation(representation)) {
+ MaybeObject* maybe_map = transition_map->GeneralizeRepresentation(
+ descriptor, value->OptimalRepresentation());
+ if (!maybe_map->To(&transition_map)) return maybe_map;
+ Object* back = transition_map->GetBackPointer();
+ if (back->IsMap()) {
+ MaybeObject* maybe_failure = self->MigrateToMap(Map::cast(back));
+ if (maybe_failure->IsFailure()) return maybe_failure;
+ }
+ DescriptorArray* desc = transition_map->instance_descriptors();
+ int descriptor = transition_map->LastAdded();
+ representation = desc->GetDetails(descriptor).representation();
+ }
int field_index = descriptors->GetFieldIndex(descriptor);
result = self->AddFastPropertyUsingMap(
- transition_map, *name, *value, field_index);
+ transition_map, *name, *value, field_index, representation);
} else {
result = self->ConvertDescriptorToField(*name, *value, attributes);
}
@@ -3810,9 +4328,8 @@ MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
PropertyDetails details = descs->GetDetails(i);
switch (details.type()) {
case CONSTANT_FUNCTION: {
- PropertyDetails d = PropertyDetails(details.attributes(),
- NORMAL,
- details.descriptor_index());
+ PropertyDetails d = PropertyDetails(
+ details.attributes(), NORMAL, i + 1);
Object* value = descs->GetConstantFunction(i);
MaybeObject* maybe_dictionary =
dictionary->Add(descs->GetKey(i), value, d);
@@ -3820,10 +4337,9 @@ MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
break;
}
case FIELD: {
- PropertyDetails d = PropertyDetails(details.attributes(),
- NORMAL,
- details.descriptor_index());
- Object* value = FastPropertyAt(descs->GetFieldIndex(i));
+ PropertyDetails d =
+ PropertyDetails(details.attributes(), NORMAL, i + 1);
+ Object* value = RawFastPropertyAt(descs->GetFieldIndex(i));
MaybeObject* maybe_dictionary =
dictionary->Add(descs->GetKey(i), value, d);
if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
@@ -3831,9 +4347,10 @@ MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
}
case CALLBACKS: {
Object* value = descs->GetCallbacksObject(i);
- details = details.set_pointer(0);
+ PropertyDetails d = PropertyDetails(
+ details.attributes(), CALLBACKS, i + 1);
MaybeObject* maybe_dictionary =
- dictionary->Add(descs->GetKey(i), value, details);
+ dictionary->Add(descs->GetKey(i), value, d);
if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
break;
}
@@ -3967,7 +4484,7 @@ MaybeObject* JSObject::NormalizeElements() {
ASSERT(old_map->has_fast_smi_or_object_elements());
value = FixedArray::cast(array)->get(i);
}
- PropertyDetails details = PropertyDetails(NONE, NORMAL);
+ PropertyDetails details = PropertyDetails(NONE, NORMAL, 0);
if (!value->IsTheHole()) {
Object* result;
MaybeObject* maybe_result =
@@ -4200,8 +4717,10 @@ MaybeObject* JSObject::GetHiddenPropertiesHashTable(
if (descriptors->GetKey(sorted_index) == GetHeap()->hidden_string() &&
sorted_index < map()->NumberOfOwnDescriptors()) {
ASSERT(descriptors->GetType(sorted_index) == FIELD);
- inline_value =
- this->FastPropertyAt(descriptors->GetFieldIndex(sorted_index));
+ MaybeObject* maybe_value = this->FastPropertyAt(
+ descriptors->GetDetails(sorted_index).representation(),
+ descriptors->GetFieldIndex(sorted_index));
+ if (!maybe_value->To(&inline_value)) return maybe_value;
} else {
inline_value = GetHeap()->undefined_value();
}
@@ -4270,8 +4789,7 @@ MaybeObject* JSObject::SetHiddenPropertiesHashTable(Object* value) {
if (descriptors->GetKey(sorted_index) == GetHeap()->hidden_string() &&
sorted_index < map()->NumberOfOwnDescriptors()) {
ASSERT(descriptors->GetType(sorted_index) == FIELD);
- this->FastPropertyAtPut(descriptors->GetFieldIndex(sorted_index),
- value);
+ FastPropertyAtPut(descriptors->GetFieldIndex(sorted_index), value);
return this;
}
}
@@ -4747,6 +5265,11 @@ MUST_USE_RESULT MaybeObject* JSObject::DeepCopy(Isolate* isolate) {
StackLimitCheck check(isolate);
if (check.HasOverflowed()) return isolate->StackOverflow();
+ if (map()->is_deprecated()) {
+ MaybeObject* maybe_failure = MigrateInstance();
+ if (maybe_failure->IsFailure()) return maybe_failure;
+ }
+
Heap* heap = isolate->heap();
Object* result;
{ MaybeObject* maybe_result = heap->CopyJSObject(this);
@@ -4756,27 +5279,24 @@ MUST_USE_RESULT MaybeObject* JSObject::DeepCopy(Isolate* isolate) {
// Deep copy local properties.
if (copy->HasFastProperties()) {
- FixedArray* properties = copy->properties();
- for (int i = 0; i < properties->length(); i++) {
- Object* value = properties->get(i);
- if (value->IsJSObject()) {
- JSObject* js_object = JSObject::cast(value);
- { MaybeObject* maybe_result = js_object->DeepCopy(isolate);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- properties->set(i, result);
- }
- }
- int nof = copy->map()->inobject_properties();
- for (int i = 0; i < nof; i++) {
- Object* value = copy->InObjectPropertyAt(i);
+ DescriptorArray* descriptors = copy->map()->instance_descriptors();
+ int limit = copy->map()->NumberOfOwnDescriptors();
+ for (int i = 0; i < limit; i++) {
+ PropertyDetails details = descriptors->GetDetails(i);
+ if (details.type() != FIELD) continue;
+ int index = descriptors->GetFieldIndex(i);
+ Object* value = RawFastPropertyAt(index);
if (value->IsJSObject()) {
JSObject* js_object = JSObject::cast(value);
- { MaybeObject* maybe_result = js_object->DeepCopy(isolate);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- copy->InObjectPropertyAtPut(i, result);
+ MaybeObject* maybe_copy = js_object->DeepCopy(isolate);
+ if (!maybe_copy->To(&value)) return maybe_copy;
+ } else {
+ Representation representation = details.representation();
+ MaybeObject* maybe_storage =
+ value->AllocateNewStorageFor(heap, representation);
+ if (!maybe_storage->To(&value)) return maybe_storage;
}
+ copy->FastPropertyAtPut(index, value);
}
} else {
{ MaybeObject* maybe_result =
@@ -4923,16 +5443,6 @@ int Map::NumberOfDescribedProperties(DescriptorFlag which,
}
-int Map::PropertyIndexFor(Name* name) {
- DescriptorArray* descs = instance_descriptors();
- int limit = NumberOfOwnDescriptors();
- for (int i = 0; i < limit; i++) {
- if (name->Equals(descs->GetKey(i))) return descs->GetFieldIndex(i);
- }
- return -1;
-}
-
-
int Map::NextFreePropertyIndex() {
int max_index = -1;
int number_of_own_descriptors = NumberOfOwnDescriptors();
@@ -5046,8 +5556,9 @@ static bool UpdateGetterSetterInDictionary(
if (details.type() == CALLBACKS && result->IsAccessorPair()) {
ASSERT(!details.IsDontDelete());
if (details.attributes() != attributes) {
- dictionary->DetailsAtPut(entry,
- PropertyDetails(attributes, CALLBACKS, index));
+ dictionary->DetailsAtPut(
+ entry,
+ PropertyDetails(attributes, CALLBACKS, index));
}
AccessorPair::cast(result)->SetComponents(getter, setter);
return true;
@@ -5208,7 +5719,7 @@ bool JSObject::CanSetCallback(Name* name) {
MaybeObject* JSObject::SetElementCallback(uint32_t index,
Object* structure,
PropertyAttributes attributes) {
- PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
+ PropertyDetails details = PropertyDetails(attributes, CALLBACKS, 0);
// Normalize elements to make this operation simple.
SeededNumberDictionary* dictionary;
@@ -5266,7 +5777,7 @@ MaybeObject* JSObject::SetPropertyCallback(Name* name,
}
// Update the dictionary with the new CALLBACKS property.
- PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
+ PropertyDetails details = PropertyDetails(attributes, CALLBACKS, 0);
maybe_ok = SetNormalizedProperty(name, structure, details);
if (maybe_ok->IsFailure()) return maybe_ok;
@@ -5610,7 +6121,14 @@ Object* JSObject::SlowReverseLookup(Object* value) {
DescriptorArray* descs = map()->instance_descriptors();
for (int i = 0; i < number_of_own_descriptors; i++) {
if (descs->GetType(i) == FIELD) {
- if (FastPropertyAt(descs->GetFieldIndex(i)) == value) {
+ Object* property = RawFastPropertyAt(descs->GetFieldIndex(i));
+ if (FLAG_track_double_fields &&
+ descs->GetDetails(i).representation().IsDouble()) {
+ ASSERT(property->IsHeapNumber());
+ if (value->IsNumber() && property->Number() == value->Number()) {
+ return descs->GetKey(i);
+ }
+ } else if (property == value) {
return descs->GetKey(i);
}
} else if (descs->GetType(i) == CONSTANT_FUNCTION) {
@@ -5640,6 +6158,7 @@ MaybeObject* Map::RawCopy(int instance_size) {
new_bit_field3 = OwnsDescriptors::update(new_bit_field3, true);
new_bit_field3 = NumberOfOwnDescriptorsBits::update(new_bit_field3, 0);
new_bit_field3 = EnumLengthBits::update(new_bit_field3, kInvalidEnumCache);
+ new_bit_field3 = Deprecated::update(new_bit_field3, false);
result->set_bit_field3(new_bit_field3);
return result;
}
@@ -5783,11 +6302,53 @@ MaybeObject* Map::CopyReplaceDescriptors(DescriptorArray* descriptors,
(descriptor_index == descriptors->number_of_descriptors() - 1)
? SIMPLE_TRANSITION
: FULL_TRANSITION;
+ ASSERT(name == descriptors->GetKey(descriptor_index));
MaybeObject* maybe_transitions = AddTransition(name, result, simple_flag);
if (!maybe_transitions->To(&transitions)) return maybe_transitions;
set_transitions(transitions);
result->SetBackPointer(this);
+ } else {
+ descriptors->InitializeRepresentations(Representation::Tagged());
+ }
+
+ return result;
+}
+
+
+MaybeObject* Map::CopyInstallDescriptors(int new_descriptor,
+ DescriptorArray* descriptors) {
+ ASSERT(descriptors->IsSortedNoDuplicates());
+
+ Map* result;
+ MaybeObject* maybe_result = CopyDropDescriptors();
+ if (!maybe_result->To(&result)) return maybe_result;
+
+ result->InitializeDescriptors(descriptors);
+ result->SetNumberOfOwnDescriptors(new_descriptor + 1);
+
+ int unused_property_fields = this->unused_property_fields();
+ if (descriptors->GetDetails(new_descriptor).type() == FIELD) {
+ unused_property_fields = this->unused_property_fields() - 1;
+ if (unused_property_fields < 0) {
+ unused_property_fields += JSObject::kFieldsAdded;
+ }
+ }
+
+ result->set_unused_property_fields(unused_property_fields);
+ result->set_owns_descriptors(false);
+
+ if (CanHaveMoreTransitions()) {
+ Name* name = descriptors->GetKey(new_descriptor);
+ TransitionArray* transitions;
+ MaybeObject* maybe_transitions =
+ AddTransition(name, result, SIMPLE_TRANSITION);
+ if (!maybe_transitions->To(&transitions)) return maybe_transitions;
+
+ set_transitions(transitions);
+ result->SetBackPointer(this);
+ } else {
+ descriptors->InitializeRepresentations(Representation::Tagged());
}
return result;
@@ -5888,7 +6449,6 @@ MaybeObject* Map::CopyAddDescriptor(Descriptor* descriptor,
int old_size = NumberOfOwnDescriptors();
int new_size = old_size + 1;
- descriptor->SetEnumerationIndex(new_size);
if (flag == INSERT_TRANSITION &&
owns_descriptors() &&
@@ -5973,9 +6533,7 @@ MaybeObject* Map::CopyReplaceDescriptor(DescriptorArray* descriptors,
int new_size = NumberOfOwnDescriptors();
ASSERT(0 <= insertion_index && insertion_index < new_size);
- PropertyDetails details = descriptors->GetDetails(insertion_index);
- ASSERT_LE(details.descriptor_index(), new_size);
- descriptor->SetEnumerationIndex(details.descriptor_index());
+ ASSERT_LT(insertion_index, new_size);
DescriptorArray* new_descriptors;
MaybeObject* maybe_descriptors = DescriptorArray::Allocate(new_size);
@@ -6817,6 +7375,117 @@ void DescriptorArray::CopyFrom(int dst_index,
}
+// Generalize the |other| descriptor array by merging it into the (at least
+// partly) updated |this| descriptor array.
+// The method merges two descriptor array in three parts. Both descriptor arrays
+// are identical up to |verbatim|. They also overlap in keys up to |valid|.
+// Between |verbatim| and |valid|, the resulting descriptor type as well as the
+// representation are generalized from both |this| and |other|. Beyond |valid|,
+// the descriptors are copied verbatim from |other| up to |new_size|.
+// In case of incompatible types, the type and representation of |other| is
+// used.
+MaybeObject* DescriptorArray::Merge(int verbatim,
+ int valid,
+ int new_size,
+ DescriptorArray* other) {
+ ASSERT(verbatim <= valid);
+ ASSERT(valid <= new_size);
+
+ DescriptorArray* result;
+ // Allocate a new descriptor array large enough to hold the required
+ // descriptors, with minimally the exact same size as this descriptor array.
+ MaybeObject* maybe_descriptors = DescriptorArray::Allocate(
+ new_size, Max(new_size, other->number_of_descriptors()) - new_size);
+ if (!maybe_descriptors->To(&result)) return maybe_descriptors;
+ ASSERT(result->length() > length() ||
+ result->NumberOfSlackDescriptors() > 0 ||
+ result->number_of_descriptors() == other->number_of_descriptors());
+ ASSERT(result->number_of_descriptors() == new_size);
+
+ DescriptorArray::WhitenessWitness witness(result);
+
+ int descriptor;
+
+ // 0 -> |verbatim|
+ int current_offset = 0;
+ for (descriptor = 0; descriptor < verbatim; descriptor++) {
+ if (GetDetails(descriptor).type() == FIELD) current_offset++;
+ result->CopyFrom(descriptor, this, descriptor, witness);
+ }
+
+ // |verbatim| -> |valid|
+ for (; descriptor < valid; descriptor++) {
+ Name* key = GetKey(descriptor);
+ PropertyDetails details = GetDetails(descriptor);
+ PropertyDetails other_details = other->GetDetails(descriptor);
+
+ if (details.type() == FIELD || other_details.type() == FIELD ||
+ (details.type() == CONSTANT_FUNCTION &&
+ other_details.type() == CONSTANT_FUNCTION &&
+ GetValue(descriptor) != other->GetValue(descriptor))) {
+ Representation representation =
+ details.representation().generalize(other_details.representation());
+ FieldDescriptor d(key,
+ current_offset++,
+ other_details.attributes(),
+ representation);
+ result->Set(descriptor, &d, witness);
+ } else {
+ result->CopyFrom(descriptor, other, descriptor, witness);
+ }
+ }
+
+ // |valid| -> |new_size|
+ for (; descriptor < new_size; descriptor++) {
+ PropertyDetails details = other->GetDetails(descriptor);
+ if (details.type() == FIELD) {
+ Name* key = other->GetKey(descriptor);
+ FieldDescriptor d(key,
+ current_offset++,
+ details.attributes(),
+ details.representation());
+ result->Set(descriptor, &d, witness);
+ } else {
+ result->CopyFrom(descriptor, other, descriptor, witness);
+ }
+ }
+
+ result->Sort();
+ return result;
+}
+
+
+// Checks whether a merge of |other| into |this| would return a copy of |this|.
+bool DescriptorArray::IsMoreGeneralThan(int verbatim,
+ int valid,
+ int new_size,
+ DescriptorArray* other) {
+ ASSERT(verbatim <= valid);
+ ASSERT(valid <= new_size);
+ if (valid != new_size) return false;
+
+ for (int descriptor = verbatim; descriptor < valid; descriptor++) {
+ PropertyDetails details = GetDetails(descriptor);
+ PropertyDetails other_details = other->GetDetails(descriptor);
+ if (details.type() != other_details.type()) {
+ if (details.type() != FIELD ||
+ other_details.type() != CONSTANT_FUNCTION) {
+ return false;
+ }
+ } else if (details.type() == CONSTANT_FUNCTION) {
+ if (GetValue(descriptor) != other->GetValue(descriptor)) {
+ return false;
+ }
+ } else if (!other_details.representation().fits_into(
+ details.representation())) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+
// We need the whiteness witness since sort will reshuffle the entries in the
// descriptor array. If the descriptor array were to be black, the shuffling
// would move a slot that was already recorded as pointing into an evacuation
@@ -8153,19 +8822,28 @@ int Map::Hash() {
}
+static bool CheckEquivalent(Map* first, Map* second) {
+ return
+ first->constructor() == second->constructor() &&
+ first->prototype() == second->prototype() &&
+ first->instance_type() == second->instance_type() &&
+ first->bit_field() == second->bit_field() &&
+ first->bit_field2() == second->bit_field2() &&
+ first->is_observed() == second->is_observed() &&
+ first->function_with_prototype() == second->function_with_prototype();
+}
+
+
+bool Map::EquivalentToForTransition(Map* other) {
+ return CheckEquivalent(this, other);
+}
+
+
bool Map::EquivalentToForNormalization(Map* other,
PropertyNormalizationMode mode) {
- return
- constructor() == other->constructor() &&
- prototype() == other->prototype() &&
- inobject_properties() == ((mode == CLEAR_INOBJECT_PROPERTIES) ?
- 0 :
- other->inobject_properties()) &&
- instance_type() == other->instance_type() &&
- bit_field() == other->bit_field() &&
- bit_field2() == other->bit_field2() &&
- is_observed() == other->is_observed() &&
- function_with_prototype() == other->function_with_prototype();
+ int properties = mode == CLEAR_INOBJECT_PROPERTIES
+ ? 0 : other->inobject_properties();
+ return CheckEquivalent(this, other) && inobject_properties() == properties;
}
@@ -8387,9 +9065,14 @@ static MUST_USE_RESULT MaybeObject* CacheInitialJSArrayMaps(
i < kFastElementsKindCount; ++i) {
Map* new_map;
ElementsKind next_kind = GetFastElementsKindFromSequenceIndex(i);
- MaybeObject* maybe_new_map =
- current_map->CopyAsElementsKind(next_kind, INSERT_TRANSITION);
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ if (current_map->HasElementsTransition()) {
+ new_map = current_map->elements_transition_map();
+ ASSERT(new_map->elements_kind() == next_kind);
+ } else {
+ MaybeObject* maybe_new_map =
+ current_map->CopyAsElementsKind(next_kind, INSERT_TRANSITION);
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ }
maps->set(next_kind, new_map);
current_map = new_map;
}
@@ -9207,6 +9890,19 @@ void Code::FindAllCode(CodeHandleList* code_list, int length) {
}
+Name* Code::FindFirstName() {
+ ASSERT(is_inline_cache_stub());
+ AssertNoAllocation no_allocation;
+ int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ for (RelocIterator it(this, mask); !it.done(); it.next()) {
+ RelocInfo* info = it.rinfo();
+ Object* object = info->target_object();
+ if (object->IsName()) return Name::cast(object);
+ }
+ return NULL;
+}
+
+
void Code::ClearInlineCaches() {
int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::CONSTRUCT_CALL) |
@@ -10665,7 +11361,7 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
}
}
FixedArrayBase* new_dictionary;
- PropertyDetails details = PropertyDetails(attributes, NORMAL);
+ PropertyDetails details = PropertyDetails(attributes, NORMAL, 0);
MaybeObject* maybe = dictionary->AddNumberEntry(index, *value, details);
if (!maybe->To(&new_dictionary)) return maybe;
if (*dictionary != SeededNumberDictionary::cast(new_dictionary)) {
@@ -12659,7 +13355,7 @@ MaybeObject* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
}
uint32_t result = pos;
- PropertyDetails no_details = PropertyDetails(NONE, NORMAL);
+ PropertyDetails no_details = PropertyDetails(NONE, NORMAL, 0);
Heap* heap = GetHeap();
while (undefs > 0) {
if (pos > static_cast<uint32_t>(Smi::kMaxValue)) {
@@ -12835,6 +13531,58 @@ MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
return result_double;
}
+ExternalArrayType JSTypedArray::type() {
+ switch (elements()->map()->instance_type()) {
+ case EXTERNAL_BYTE_ARRAY_TYPE:
+ return kExternalByteArray;
+ case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
+ return kExternalUnsignedByteArray;
+ case EXTERNAL_SHORT_ARRAY_TYPE:
+ return kExternalShortArray;
+ case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
+ return kExternalUnsignedShortArray;
+ case EXTERNAL_INT_ARRAY_TYPE:
+ return kExternalIntArray;
+ case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
+ return kExternalUnsignedIntArray;
+ case EXTERNAL_FLOAT_ARRAY_TYPE:
+ return kExternalFloatArray;
+ case EXTERNAL_DOUBLE_ARRAY_TYPE:
+ return kExternalDoubleArray;
+ case EXTERNAL_PIXEL_ARRAY_TYPE:
+ return kExternalPixelArray;
+ default:
+ return static_cast<ExternalArrayType>(-1);
+ }
+}
+
+
+size_t JSTypedArray::element_size() {
+ switch (elements()->map()->instance_type()) {
+ case EXTERNAL_BYTE_ARRAY_TYPE:
+ return 1;
+ case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
+ return 1;
+ case EXTERNAL_SHORT_ARRAY_TYPE:
+ return 2;
+ case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
+ return 2;
+ case EXTERNAL_INT_ARRAY_TYPE:
+ return 4;
+ case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
+ return 4;
+ case EXTERNAL_FLOAT_ARRAY_TYPE:
+ return 4;
+ case EXTERNAL_DOUBLE_ARRAY_TYPE:
+ return 8;
+ case EXTERNAL_PIXEL_ARRAY_TYPE:
+ return 1;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
Object* ExternalPixelArray::SetValue(uint32_t index, Object* value) {
uint8_t clamped_value = 0;
@@ -13017,7 +13765,7 @@ MaybeObject* GlobalObject::EnsurePropertyCell(Name* name) {
heap->AllocateJSGlobalPropertyCell(heap->the_hole_value());
if (!maybe_cell->ToObject(&cell)) return maybe_cell;
}
- PropertyDetails details(NONE, NORMAL);
+ PropertyDetails details(NONE, NORMAL, 0);
details = details.AsDeleted();
Object* dictionary;
{ MaybeObject* maybe_dictionary =
@@ -13459,8 +14207,8 @@ MaybeObject* Dictionary<Shape, Key>::GenerateNewEnumerationIndices() {
if (Dictionary<Shape, Key>::IsKey(Dictionary<Shape, Key>::KeyAt(i))) {
int enum_index = Smi::cast(enumeration_order->get(pos++))->value();
PropertyDetails details = DetailsAt(i);
- PropertyDetails new_details =
- PropertyDetails(details.attributes(), details.type(), enum_index);
+ PropertyDetails new_details = PropertyDetails(
+ details.attributes(), details.type(), enum_index);
DetailsAtPut(i, new_details);
}
}
@@ -13526,7 +14274,7 @@ MaybeObject* Dictionary<Shape, Key>::AtPut(Key key, Object* value) {
{ MaybeObject* maybe_k = Shape::AsObject(this->GetHeap(), key);
if (!maybe_k->ToObject(&k)) return maybe_k;
}
- PropertyDetails details = PropertyDetails(NONE, NORMAL);
+ PropertyDetails details = PropertyDetails(NONE, NORMAL, 0);
return Dictionary<Shape, Key>::cast(obj)->AddEntry(key, value, details,
Dictionary<Shape, Key>::Hash(key));
@@ -13537,8 +14285,6 @@ template<typename Shape, typename Key>
MaybeObject* Dictionary<Shape, Key>::Add(Key key,
Object* value,
PropertyDetails details) {
- ASSERT(details.dictionary_index() == details.descriptor_index());
-
// Valdate key is absent.
SLOW_ASSERT((this->FindEntry(key) == Dictionary<Shape, Key>::kNotFound));
// Check whether the dictionary should be extended.
@@ -13614,7 +14360,7 @@ MaybeObject* SeededNumberDictionary::AddNumberEntry(uint32_t key,
MaybeObject* UnseededNumberDictionary::AddNumberEntry(uint32_t key,
Object* value) {
SLOW_ASSERT(this->FindEntry(key) == kNotFound);
- return Add(key, value, PropertyDetails(NONE, NORMAL));
+ return Add(key, value, PropertyDetails(NONE, NORMAL, 0));
}
@@ -13911,15 +14657,13 @@ MaybeObject* NameDictionary::TransformPropertiesToFastFor(
}
PropertyDetails details = DetailsAt(i);
- ASSERT(details.descriptor_index() == details.dictionary_index());
- int enumeration_index = details.descriptor_index();
+ int enumeration_index = details.dictionary_index();
PropertyType type = details.type();
if (value->IsJSFunction()) {
ConstantFunctionDescriptor d(key,
JSFunction::cast(value),
- details.attributes(),
- enumeration_index);
+ details.attributes());
descriptors->Set(enumeration_index - 1, &d, witness);
} else if (type == NORMAL) {
if (current_offset < inobject_props) {
@@ -13933,13 +14677,13 @@ MaybeObject* NameDictionary::TransformPropertiesToFastFor(
FieldDescriptor d(key,
current_offset++,
details.attributes(),
- enumeration_index);
+ // TODO(verwaest): value->OptimalRepresentation();
+ Representation::Tagged());
descriptors->Set(enumeration_index - 1, &d, witness);
} else if (type == CALLBACKS) {
CallbacksDescriptor d(key,
value,
- details.attributes(),
- enumeration_index);
+ details.attributes());
descriptors->Set(enumeration_index - 1, &d, witness);
} else {
UNREACHABLE();
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index e32c41bb1..1b4ed5b3b 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -1061,6 +1061,29 @@ class Object : public MaybeObject {
inline double Number();
inline bool IsNaN();
+ inline Representation OptimalRepresentation() {
+ if (FLAG_track_fields && IsSmi()) {
+ return Representation::Smi();
+ } else if (FLAG_track_double_fields && IsHeapNumber()) {
+ return Representation::Double();
+ } else {
+ return Representation::Tagged();
+ }
+ }
+
+ inline bool FitsRepresentation(Representation representation) {
+ if (FLAG_track_fields && representation.IsSmi()) {
+ return IsSmi();
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ return IsNumber();
+ }
+ return true;
+ }
+
+ inline MaybeObject* AllocateNewStorageFor(Heap* heap,
+ Representation representation,
+ PretenureFlag tenure = NOT_TENURED);
+
// Returns true if the object is of the correct type to be used as a
// implementation of a JSObject's elements.
inline bool HasValidElements();
@@ -1809,10 +1832,11 @@ class JSObject: public JSReceiver {
// Extend the receiver with a single fast property appeared first in the
// passed map. This also extends the property backing store if necessary.
- static void AddFastPropertyUsingMap(Handle<JSObject> object, Handle<Map> map);
- inline MUST_USE_RESULT MaybeObject* AddFastPropertyUsingMap(Map* map);
- static void TransitionToMap(Handle<JSObject> object, Handle<Map> map);
- inline MUST_USE_RESULT MaybeObject* TransitionToMap(Map* map);
+ static void AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map);
+ inline MUST_USE_RESULT MaybeObject* AllocateStorageForMap(Map* map);
+
+ static void MigrateInstance(Handle<JSObject> instance);
+ inline MUST_USE_RESULT MaybeObject* MigrateInstance();
// Can cause GC.
MUST_USE_RESULT MaybeObject* SetLocalPropertyIgnoreAttributes(
@@ -2115,10 +2139,12 @@ class JSObject: public JSReceiver {
// Add a property to a fast-case object using a map transition to
// new_map.
- MUST_USE_RESULT MaybeObject* AddFastPropertyUsingMap(Map* new_map,
- Name* name,
- Object* value,
- int field_index);
+ MUST_USE_RESULT MaybeObject* AddFastPropertyUsingMap(
+ Map* new_map,
+ Name* name,
+ Object* value,
+ int field_index,
+ Representation representation);
// Add a constant function property to a fast-case object.
// This leaves a CONSTANT_TRANSITION in the old map, and
@@ -2167,6 +2193,11 @@ class JSObject: public JSReceiver {
Object* new_value,
PropertyAttributes attributes);
+ MUST_USE_RESULT MaybeObject* MigrateToMap(Map* new_map);
+ MUST_USE_RESULT MaybeObject* GeneralizeFieldRepresentation(
+ int modify_index,
+ Representation new_representation);
+
// Add a property to a fast-case object.
MUST_USE_RESULT MaybeObject* AddFastProperty(
Name* name,
@@ -2222,8 +2253,11 @@ class JSObject: public JSReceiver {
int unused_property_fields);
// Access fast-case object properties at index.
- inline Object* FastPropertyAt(int index);
- inline Object* FastPropertyAtPut(int index, Object* value);
+ MUST_USE_RESULT inline MaybeObject* FastPropertyAt(
+ Representation representation,
+ int index);
+ inline Object* RawFastPropertyAt(int index);
+ inline void FastPropertyAtPut(int index, Object* value);
// Access to in object properties.
inline int GetInObjectPropertyOffset(int index);
@@ -2756,6 +2790,9 @@ class DescriptorArray: public FixedArray {
inline Name* GetSortedKey(int descriptor_number);
inline int GetSortedKeyIndex(int descriptor_number);
inline void SetSortedKey(int pointer, int descriptor_number);
+ inline void InitializeRepresentations(Representation representation);
+ inline void SetRepresentation(int descriptor_number,
+ Representation representation);
// Accessor for complete descriptor.
inline void Get(int descriptor_number, Descriptor* desc);
@@ -2776,6 +2813,15 @@ class DescriptorArray: public FixedArray {
DescriptorArray* src,
int src_index,
const WhitenessWitness&);
+ MUST_USE_RESULT MaybeObject* Merge(int verbatim,
+ int valid,
+ int new_size,
+ DescriptorArray* other);
+
+ bool IsMoreGeneralThan(int verbatim,
+ int valid,
+ int new_size,
+ DescriptorArray* other);
MUST_USE_RESULT MaybeObject* CopyUpTo(int enumeration_index);
@@ -4592,6 +4638,9 @@ class Code: public HeapObject {
Code* FindFirstCode();
void FindAllCode(CodeHandleList* code_list, int length);
+ // Find the first name in an IC stub.
+ Name* FindFirstName();
+
class ExtraICStateStrictMode: public BitField<StrictModeFlag, 0, 1> {};
class ExtraICStateKeyedAccessStoreMode:
public BitField<KeyedAccessStoreMode, 1, 4> {}; // NOLINT
@@ -4911,6 +4960,9 @@ class DependentCode: public FixedArray {
// Group of code that weakly embed this map and depend on being
// deoptimized when the map is garbage collected.
kWeaklyEmbeddedGroup,
+ // Group of code that embed a transition to this map, and depend on being
+ // deoptimized when the transition is replaced by a new version.
+ kTransitionGroup,
// Group of code that omit run-time prototype checks for prototypes
// described by this map. The group is deoptimized whenever an object
// described by this map changes shape (and transitions to a new map),
@@ -5004,6 +5056,7 @@ class Map: public HeapObject {
class DictionaryMap: public BitField<bool, 24, 1> {};
class OwnsDescriptors: public BitField<bool, 25, 1> {};
class IsObserved: public BitField<bool, 26, 1> {};
+ class Deprecated: public BitField<bool, 27, 1> {};
// Tells whether the object in the prototype property will be used
// for instances created from this function. If the prototype
@@ -5146,6 +5199,28 @@ class Map: public HeapObject {
inline void ClearTransitions(Heap* heap,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ void DeprecateTransitionTree();
+ void DeprecateTarget(Name* key, DescriptorArray* new_descriptors);
+
+ Map* FindRootMap();
+ Map* FindUpdatedMap(int verbatim, int length, DescriptorArray* descriptors);
+ Map* FindLastMatchMap(int verbatim, int length, DescriptorArray* descriptors);
+
+ int NumberOfFields();
+
+ bool InstancesNeedRewriting(Map* target,
+ int target_number_of_fields,
+ int target_inobject,
+ int target_unused);
+ static Handle<Map> GeneralizeRepresentation(
+ Handle<Map> map,
+ int modify_index,
+ Representation new_representation);
+ MUST_USE_RESULT MaybeObject* GeneralizeRepresentation(
+ int modify_index,
+ Representation representation);
+ MUST_USE_RESULT MaybeObject* CopyGeneralizeAllRepresentations();
+
// Tells whether the map is attached to SharedFunctionInfo
// (for inobject slack tracking).
inline void set_attached_to_shared_function_info(bool value);
@@ -5284,6 +5359,15 @@ class Map: public HeapObject {
inline void set_owns_descriptors(bool is_shared);
inline bool is_observed();
inline void set_is_observed(bool is_observed);
+ inline void deprecate();
+ inline bool is_deprecated();
+ inline bool CanBeDeprecated();
+ // Returns a non-deprecated version of the input. If the input was not
+ // deprecated, it is directly returned. Otherwise, the non-deprecated version
+ // is found by re-transitioning from the root of the transition tree using the
+ // descriptor array of the map. New maps (and transitions) may be created if
+ // no new (more general) version exists.
+ static inline Handle<Map> CurrentMapForDeprecated(Handle<Map> map);
MUST_USE_RESULT MaybeObject* RawCopy(int instance_size);
MUST_USE_RESULT MaybeObject* CopyWithPreallocatedFieldDescriptors();
@@ -5293,6 +5377,9 @@ class Map: public HeapObject {
Name* name,
TransitionFlag flag,
int descriptor_index);
+ MUST_USE_RESULT MaybeObject* CopyInstallDescriptors(
+ int new_descriptor,
+ DescriptorArray* descriptors);
MUST_USE_RESULT MaybeObject* ShareDescriptor(DescriptorArray* descriptors,
Descriptor* descriptor);
MUST_USE_RESULT MaybeObject* CopyAddDescriptor(Descriptor* descriptor,
@@ -5318,9 +5405,6 @@ class Map: public HeapObject {
// instance descriptors.
MUST_USE_RESULT MaybeObject* Copy();
- // Returns the property index for name (only valid for FAST MODE).
- int PropertyIndexFor(Name* name);
-
// Returns the next free property index (only valid for FAST MODE).
int NextFreePropertyIndex();
@@ -5371,6 +5455,8 @@ class Map: public HeapObject {
// Computes a hash value for this map, to be used in HashTables and such.
int Hash();
+ bool EquivalentToForTransition(Map* other);
+
// Compares this map to another to see if they describe equivalent objects.
// If |mode| is set to CLEAR_INOBJECT_PROPERTIES, |other| is treated as if
// it had exactly zero inobject properties.
@@ -6334,9 +6420,14 @@ class JSGeneratorObject: public JSObject {
inline int continuation();
inline void set_continuation(int continuation);
- // [operands]: Saved operand stack.
+ // [operand_stack]: Saved operand stack.
DECL_ACCESSORS(operand_stack, FixedArray)
+ // [stack_handler_index]: Index of first stack handler in operand_stack, or -1
+ // if the captured activation had no stack handler.
+ inline int stack_handler_index();
+ inline void set_stack_handler_index(int stack_handler_index);
+
// Casting.
static inline JSGeneratorObject* cast(Object* obj);
@@ -6354,11 +6445,24 @@ class JSGeneratorObject: public JSObject {
static const int kReceiverOffset = kContextOffset + kPointerSize;
static const int kContinuationOffset = kReceiverOffset + kPointerSize;
static const int kOperandStackOffset = kContinuationOffset + kPointerSize;
- static const int kSize = kOperandStackOffset + kPointerSize;
+ static const int kStackHandlerIndexOffset =
+ kOperandStackOffset + kPointerSize;
+ static const int kSize = kStackHandlerIndexOffset + kPointerSize;
// Resume mode, for use by runtime functions.
enum ResumeMode { SEND, THROW };
+ // Yielding from a generator returns an object with the following inobject
+ // properties. See Context::generator_result_map() for the map.
+ static const int kResultValuePropertyIndex = 0;
+ static const int kResultDonePropertyIndex = 1;
+ static const int kResultPropertyCount = 2;
+
+ static const int kResultValuePropertyOffset = JSObject::kHeaderSize;
+ static const int kResultDonePropertyOffset =
+ kResultValuePropertyOffset + kPointerSize;
+ static const int kResultSize = kResultDonePropertyOffset + kPointerSize;
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSGeneratorObject);
};
@@ -7638,8 +7742,6 @@ class String: public Name {
// possible.
inline bool HasOnlyOneByteChars();
- inline bool IsOneByteConvertible();
-
// Get and set individual two byte chars in the string.
inline void Set(int index, uint16_t value);
// Get individual two byte char in the string. Repeated calls
@@ -8647,6 +8749,9 @@ class JSTypedArray: public JSObject {
// Casting.
static inline JSTypedArray* cast(Object* obj);
+ ExternalArrayType type();
+ size_t element_size();
+
// Dispatched behavior.
DECLARE_PRINTER(JSTypedArray)
DECLARE_VERIFIER(JSTypedArray)
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index 267b8722c..cff51bc9c 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -662,7 +662,7 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
!body->at(0)->IsExpressionStatement() ||
!body->at(0)->AsExpressionStatement()->
expression()->IsFunctionLiteral()) {
- ReportMessage("unable_to_parse", Vector<const char*>::empty());
+ ReportMessage("single_function_literal", Vector<const char*>::empty());
ok = false;
}
}
@@ -3738,33 +3738,6 @@ Expression* Parser::ParsePrimaryExpression(bool* ok) {
}
-void Parser::BuildArrayLiteralBoilerplateLiterals(ZoneList<Expression*>* values,
- Handle<FixedArray> literals,
- bool* is_simple,
- int* depth) {
- // Fill in the literals.
- // Accumulate output values in local variables.
- bool is_simple_acc = true;
- int depth_acc = 1;
- for (int i = 0; i < values->length(); i++) {
- MaterializedLiteral* m_literal = values->at(i)->AsMaterializedLiteral();
- if (m_literal != NULL && m_literal->depth() >= depth_acc) {
- depth_acc = m_literal->depth() + 1;
- }
- Handle<Object> boilerplate_value = GetBoilerplateValue(values->at(i));
- if (boilerplate_value->IsUndefined()) {
- literals->set_the_hole(i);
- is_simple_acc = false;
- } else {
- literals->set(i, *boilerplate_value);
- }
- }
-
- *is_simple = is_simple_acc;
- *depth = depth_acc;
-}
-
-
Expression* Parser::ParseArrayLiteral(bool* ok) {
// ArrayLiteral ::
// '[' Expression? (',' Expression?)* ']'
@@ -3991,7 +3964,8 @@ void Parser::BuildObjectLiteralConstantProperties(
Handle<FixedArray> constant_properties,
bool* is_simple,
bool* fast_elements,
- int* depth) {
+ int* depth,
+ bool* may_store_doubles) {
int position = 0;
// Accumulate the value in local variables and store it at the end.
bool is_simple_acc = true;
@@ -4014,6 +3988,13 @@ void Parser::BuildObjectLiteralConstantProperties(
// runtime. The enumeration order is maintained.
Handle<Object> key = property->key()->handle();
Handle<Object> value = GetBoilerplateValue(property->value());
+
+ // Ensure objects with doubles are always treated as nested objects.
+ // TODO(verwaest): Remove once we can store them inline.
+ if (FLAG_track_double_fields && value->IsNumber()) {
+ *may_store_doubles = true;
+ }
+
is_simple_acc = is_simple_acc && !value->IsUndefined();
// Keep track of the number of elements in the object literal and
@@ -4215,17 +4196,20 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
bool is_simple = true;
bool fast_elements = true;
int depth = 1;
+ bool may_store_doubles = false;
BuildObjectLiteralConstantProperties(properties,
constant_properties,
&is_simple,
&fast_elements,
- &depth);
+ &depth,
+ &may_store_doubles);
return factory()->NewObjectLiteral(constant_properties,
properties,
literal_index,
is_simple,
fast_elements,
depth,
+ may_store_doubles,
has_function);
}
@@ -4752,7 +4736,7 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
if (args->length() == 1 && args->at(0)->AsVariableProxy() != NULL) {
return args->at(0);
} else {
- ReportMessage("unable_to_parse", Vector<const char*>::empty());
+ ReportMessage("not_isvar", Vector<const char*>::empty());
*ok = false;
return NULL;
}
diff --git a/deps/v8/src/parser.h b/deps/v8/src/parser.h
index acf47bbcd..1defbf274 100644
--- a/deps/v8/src/parser.h
+++ b/deps/v8/src/parser.h
@@ -692,13 +692,8 @@ class Parser BASE_EMBEDDED {
Handle<FixedArray> constants,
bool* is_simple,
bool* fast_elements,
- int* depth);
-
- // Populate the literals fixed array for a materialized array literal.
- void BuildArrayLiteralBoilerplateLiterals(ZoneList<Expression*>* properties,
- Handle<FixedArray> constants,
- bool* is_simple,
- int* depth);
+ int* depth,
+ bool* may_store_doubles);
// Decide if a property should be in the object boilerplate.
bool IsBoilerplateProperty(ObjectLiteral::Property* property);
diff --git a/deps/v8/src/platform-posix.cc b/deps/v8/src/platform-posix.cc
index 054d5b5a5..f76ec4433 100644
--- a/deps/v8/src/platform-posix.cc
+++ b/deps/v8/src/platform-posix.cc
@@ -115,26 +115,11 @@ void* OS::GetRandomMmapAddr() {
raw_addr &= V8_UINT64_C(0x3ffffffff000);
#else
uint32_t raw_addr = V8::RandomPrivate(isolate);
-
- raw_addr &= 0x3ffff000;
-
-# ifdef __sun
- // For our Solaris/illumos mmap hint, we pick a random address in the bottom
- // half of the top half of the address space (that is, the third quarter).
- // Because we do not MAP_FIXED, this will be treated only as a hint -- the
- // system will not fail to mmap() because something else happens to already
- // be mapped at our random address. We deliberately set the hint high enough
- // to get well above the system's break (that is, the heap); Solaris and
- // illumos will try the hint and if that fails allocate as if there were
- // no hint at all. The high hint prevents the break from getting hemmed in
- // at low values, ceding half of the address space to the system heap.
- raw_addr += 0x80000000;
-# else
// The range 0x20000000 - 0x60000000 is relatively unpopulated across a
// variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos
// 10.6 and 10.7.
+ raw_addr &= 0x3ffff000;
raw_addr += 0x20000000;
-# endif
#endif
return reinterpret_cast<void*>(raw_addr);
}
diff --git a/deps/v8/src/prettyprinter.cc b/deps/v8/src/prettyprinter.cc
index 4660c0fde..3a1eca7c6 100644
--- a/deps/v8/src/prettyprinter.cc
+++ b/deps/v8/src/prettyprinter.cc
@@ -628,11 +628,7 @@ void PrettyPrinter::PrintCaseClause(CaseClause* clause) {
class IndentedScope BASE_EMBEDDED {
public:
- explicit IndentedScope(AstPrinter* printer) : ast_printer_(printer) {
- ast_printer_->inc_indent();
- }
-
- IndentedScope(AstPrinter* printer, const char* txt, AstNode* node = NULL)
+ IndentedScope(AstPrinter* printer, const char* txt)
: ast_printer_(printer) {
ast_printer_->PrintIndented(txt);
ast_printer_->Print("\n");
@@ -693,21 +689,16 @@ void AstPrinter::PrintLiteralWithModeIndented(const char* info,
}
-void AstPrinter::PrintLabelsIndented(const char* info, ZoneStringList* labels) {
- if (labels != NULL && labels->length() > 0) {
- PrintIndented(info == NULL ? "LABELS" : info);
- Print(" ");
- PrintLabels(labels);
- Print("\n");
- } else if (info != NULL) {
- PrintIndented(info);
- Print("\n");
- }
+void AstPrinter::PrintLabelsIndented(ZoneStringList* labels) {
+ if (labels == NULL || labels->length() == 0) return;
+ PrintIndented("LABELS ");
+ PrintLabels(labels);
+ Print("\n");
}
void AstPrinter::PrintIndentedVisit(const char* s, AstNode* node) {
- IndentedScope indent(this, s, node);
+ IndentedScope indent(this, s);
Visit(node);
}
@@ -779,6 +770,7 @@ void AstPrinter::VisitBlock(Block* node) {
}
+// TODO(svenpanne) Start with IndentedScope.
void AstPrinter::VisitVariableDeclaration(VariableDeclaration* node) {
PrintLiteralWithModeIndented(Variable::Mode2String(node->mode()),
node->proxy()->var(),
@@ -786,6 +778,7 @@ void AstPrinter::VisitVariableDeclaration(VariableDeclaration* node) {
}
+// TODO(svenpanne) Start with IndentedScope.
void AstPrinter::VisitFunctionDeclaration(FunctionDeclaration* node) {
PrintIndented("FUNCTION ");
PrintLiteral(node->proxy()->name(), true);
@@ -816,19 +809,21 @@ void AstPrinter::VisitExportDeclaration(ExportDeclaration* node) {
void AstPrinter::VisitModuleLiteral(ModuleLiteral* node) {
+ IndentedScope indent(this, "MODULE LITERAL");
VisitBlock(node->body());
}
void AstPrinter::VisitModuleVariable(ModuleVariable* node) {
+ IndentedScope indent(this, "MODULE VARIABLE");
Visit(node->proxy());
}
void AstPrinter::VisitModulePath(ModulePath* node) {
- IndentedScope indent(this, "PATH");
- PrintIndentedVisit("MODULE", node->module());
- PrintLiteralIndented("NAME", node->name(), false);
+ IndentedScope indent(this, "MODULE PATH");
+ PrintIndentedVisit("MODULE PATH PARENT", node->module());
+ PrintLiteralIndented("NAME", node->name(), true);
}
@@ -838,24 +833,26 @@ void AstPrinter::VisitModuleUrl(ModuleUrl* node) {
void AstPrinter::VisitModuleStatement(ModuleStatement* node) {
- IndentedScope indent(this, "MODULE");
+ IndentedScope indent(this, "MODULE STATEMENT");
PrintLiteralIndented("NAME", node->proxy()->name(), true);
PrintStatements(node->body()->statements());
}
void AstPrinter::VisitExpressionStatement(ExpressionStatement* node) {
+ IndentedScope indent(this, "EXPRESSION STATEMENT");
Visit(node->expression());
}
void AstPrinter::VisitEmptyStatement(EmptyStatement* node) {
- PrintIndented("EMPTY\n");
+ IndentedScope indent(this, "EMPTY");
}
void AstPrinter::VisitIfStatement(IfStatement* node) {
- PrintIndentedVisit("IF", node->condition());
+ IndentedScope indent(this, "IF");
+ PrintIndentedVisit("CONDITION", node->condition());
PrintIndentedVisit("THEN", node->then_statement());
if (node->HasElseStatement()) {
PrintIndentedVisit("ELSE", node->else_statement());
@@ -864,17 +861,20 @@ void AstPrinter::VisitIfStatement(IfStatement* node) {
void AstPrinter::VisitContinueStatement(ContinueStatement* node) {
- PrintLabelsIndented("CONTINUE", node->target()->labels());
+ IndentedScope indent(this, "CONTINUE");
+ PrintLabelsIndented(node->target()->labels());
}
void AstPrinter::VisitBreakStatement(BreakStatement* node) {
- PrintLabelsIndented("BREAK", node->target()->labels());
+ IndentedScope indent(this, "BREAK");
+ PrintLabelsIndented(node->target()->labels());
}
void AstPrinter::VisitReturnStatement(ReturnStatement* node) {
- PrintIndentedVisit("RETURN", node->expression());
+ IndentedScope indent(this, "RETURN");
+ Visit(node->expression());
}
@@ -887,7 +887,7 @@ void AstPrinter::VisitWithStatement(WithStatement* node) {
void AstPrinter::VisitSwitchStatement(SwitchStatement* node) {
IndentedScope indent(this, "SWITCH");
- PrintLabelsIndented(NULL, node->labels());
+ PrintLabelsIndented(node->labels());
PrintIndentedVisit("TAG", node->tag());
for (int i = 0; i < node->cases()->length(); i++) {
PrintCaseClause(node->cases()->at(i));
@@ -897,7 +897,7 @@ void AstPrinter::VisitSwitchStatement(SwitchStatement* node) {
void AstPrinter::VisitDoWhileStatement(DoWhileStatement* node) {
IndentedScope indent(this, "DO");
- PrintLabelsIndented(NULL, node->labels());
+ PrintLabelsIndented(node->labels());
PrintIndentedVisit("BODY", node->body());
PrintIndentedVisit("COND", node->cond());
}
@@ -905,7 +905,7 @@ void AstPrinter::VisitDoWhileStatement(DoWhileStatement* node) {
void AstPrinter::VisitWhileStatement(WhileStatement* node) {
IndentedScope indent(this, "WHILE");
- PrintLabelsIndented(NULL, node->labels());
+ PrintLabelsIndented(node->labels());
PrintIndentedVisit("COND", node->cond());
PrintIndentedVisit("BODY", node->body());
}
@@ -913,7 +913,7 @@ void AstPrinter::VisitWhileStatement(WhileStatement* node) {
void AstPrinter::VisitForStatement(ForStatement* node) {
IndentedScope indent(this, "FOR");
- PrintLabelsIndented(NULL, node->labels());
+ PrintLabelsIndented(node->labels());
if (node->init()) PrintIndentedVisit("INIT", node->init());
if (node->cond()) PrintIndentedVisit("COND", node->cond());
PrintIndentedVisit("BODY", node->body());
@@ -972,12 +972,13 @@ void AstPrinter::VisitSharedFunctionInfoLiteral(
void AstPrinter::VisitConditional(Conditional* node) {
IndentedScope indent(this, "CONDITIONAL");
- PrintIndentedVisit("?", node->condition());
+ PrintIndentedVisit("CONDITION", node->condition());
PrintIndentedVisit("THEN", node->then_expression());
PrintIndentedVisit("ELSE", node->else_expression());
}
+// TODO(svenpanne) Start with IndentedScope.
void AstPrinter::VisitLiteral(Literal* node) {
PrintLiteralIndented("LITERAL", node->handle(), true);
}
@@ -1034,6 +1035,7 @@ void AstPrinter::VisitArrayLiteral(ArrayLiteral* node) {
}
+// TODO(svenpanne) Start with IndentedScope.
void AstPrinter::VisitVariableProxy(VariableProxy* node) {
Variable* var = node->var();
EmbeddedVector<char, 128> buf;
@@ -1059,24 +1061,26 @@ void AstPrinter::VisitVariableProxy(VariableProxy* node) {
void AstPrinter::VisitAssignment(Assignment* node) {
- IndentedScope indent(this, Token::Name(node->op()), node);
+ IndentedScope indent(this, Token::Name(node->op()));
Visit(node->target());
Visit(node->value());
}
void AstPrinter::VisitYield(Yield* node) {
- PrintIndentedVisit("YIELD", node->expression());
+ IndentedScope indent(this, "YIELD");
+ Visit(node->expression());
}
void AstPrinter::VisitThrow(Throw* node) {
- PrintIndentedVisit("THROW", node->exception());
+ IndentedScope indent(this, "THROW");
+ Visit(node->exception());
}
void AstPrinter::VisitProperty(Property* node) {
- IndentedScope indent(this, "PROPERTY", node);
+ IndentedScope indent(this, "PROPERTY");
Visit(node->obj());
Literal* literal = node->key()->AsLiteral();
if (literal != NULL && literal->handle()->IsInternalizedString()) {
@@ -1102,14 +1106,15 @@ void AstPrinter::VisitCallNew(CallNew* node) {
void AstPrinter::VisitCallRuntime(CallRuntime* node) {
- PrintLiteralIndented("CALL RUNTIME ", node->name(), false);
- IndentedScope indent(this);
+ IndentedScope indent(this, "CALL RUNTIME");
+ PrintLiteralIndented("NAME", node->name(), false);
PrintArguments(node->arguments());
}
void AstPrinter::VisitUnaryOperation(UnaryOperation* node) {
- PrintIndentedVisit(Token::Name(node->op()), node->expression());
+ IndentedScope indent(this, Token::Name(node->op()));
+ Visit(node->expression());
}
@@ -1117,19 +1122,20 @@ void AstPrinter::VisitCountOperation(CountOperation* node) {
EmbeddedVector<char, 128> buf;
OS::SNPrintF(buf, "%s %s", (node->is_prefix() ? "PRE" : "POST"),
Token::Name(node->op()));
- PrintIndentedVisit(buf.start(), node->expression());
+ IndentedScope indent(this, buf.start());
+ Visit(node->expression());
}
void AstPrinter::VisitBinaryOperation(BinaryOperation* node) {
- IndentedScope indent(this, Token::Name(node->op()), node);
+ IndentedScope indent(this, Token::Name(node->op()));
Visit(node->left());
Visit(node->right());
}
void AstPrinter::VisitCompareOperation(CompareOperation* node) {
- IndentedScope indent(this, Token::Name(node->op()), node);
+ IndentedScope indent(this, Token::Name(node->op()));
Visit(node->left());
Visit(node->right());
}
diff --git a/deps/v8/src/prettyprinter.h b/deps/v8/src/prettyprinter.h
index 41175ab2a..6657ecd14 100644
--- a/deps/v8/src/prettyprinter.h
+++ b/deps/v8/src/prettyprinter.h
@@ -106,7 +106,7 @@ class AstPrinter: public PrettyPrinter {
void PrintLiteralWithModeIndented(const char* info,
Variable* var,
Handle<Object> value);
- void PrintLabelsIndented(const char* info, ZoneStringList* labels);
+ void PrintLabelsIndented(ZoneStringList* labels);
void inc_indent() { indent_++; }
void dec_indent() { indent_--; }
diff --git a/deps/v8/src/property-details.h b/deps/v8/src/property-details.h
index 2aa6dcfa9..62140fe96 100644
--- a/deps/v8/src/property-details.h
+++ b/deps/v8/src/property-details.h
@@ -76,30 +76,121 @@ enum PropertyType {
};
+class Representation {
+ public:
+ enum Kind {
+ kNone,
+ kSmi,
+ kInteger32,
+ kDouble,
+ kTagged,
+ kExternal,
+ kNumRepresentations
+ };
+
+ Representation() : kind_(kNone) { }
+
+ static Representation None() { return Representation(kNone); }
+ static Representation Tagged() { return Representation(kTagged); }
+ static Representation Smi() { return Representation(kSmi); }
+ static Representation Integer32() { return Representation(kInteger32); }
+ static Representation Double() { return Representation(kDouble); }
+ static Representation External() { return Representation(kExternal); }
+
+ static Representation FromKind(Kind kind) { return Representation(kind); }
+
+ bool Equals(const Representation& other) const {
+ return kind_ == other.kind_;
+ }
+
+ bool IsCompatibleForLoad(const Representation& other) const {
+ return (IsDouble() && other.IsDouble()) ||
+ (!IsDouble() && !other.IsDouble());
+ }
+
+ bool is_more_general_than(const Representation& other) const {
+ ASSERT(kind_ != kExternal);
+ ASSERT(other.kind_ != kExternal);
+ return kind_ > other.kind_;
+ }
+
+ bool fits_into(const Representation& other) const {
+ return other.is_more_general_than(*this) || other.Equals(*this);
+ }
+
+ Representation generalize(Representation other) {
+ if (is_more_general_than(other)) {
+ return *this;
+ } else {
+ return other;
+ }
+ }
+
+ Kind kind() const { return static_cast<Kind>(kind_); }
+ bool IsNone() const { return kind_ == kNone; }
+ bool IsTagged() const { return kind_ == kTagged; }
+ bool IsSmi() const { return kind_ == kSmi; }
+ bool IsInteger32() const { return kind_ == kInteger32; }
+ bool IsDouble() const { return kind_ == kDouble; }
+ bool IsExternal() const { return kind_ == kExternal; }
+ bool IsSpecialization() const {
+ return kind_ == kInteger32 || kind_ == kDouble;
+ }
+ const char* Mnemonic() const;
+
+ private:
+ explicit Representation(Kind k) : kind_(k) { }
+
+ // Make sure kind fits in int8.
+ STATIC_ASSERT(kNumRepresentations <= (1 << kBitsPerByte));
+
+ int8_t kind_;
+};
+
+
// PropertyDetails captures type and attributes for a property.
// They are used both in property dictionaries and instance descriptors.
class PropertyDetails BASE_EMBEDDED {
public:
PropertyDetails(PropertyAttributes attributes,
PropertyType type,
- int index = 0) {
+ int index) {
value_ = TypeField::encode(type)
| AttributesField::encode(attributes)
| DictionaryStorageField::encode(index);
ASSERT(type == this->type());
ASSERT(attributes == this->attributes());
- ASSERT(index == this->dictionary_index());
+ }
+
+ PropertyDetails(PropertyAttributes attributes,
+ PropertyType type,
+ Representation representation) {
+ value_ = TypeField::encode(type)
+ | AttributesField::encode(attributes)
+ | RepresentationField::encode(EncodeRepresentation(representation));
}
int pointer() { return DescriptorPointer::decode(value_); }
PropertyDetails set_pointer(int i) { return PropertyDetails(value_, i); }
+ PropertyDetails CopyWithRepresentation(Representation representation) {
+ return PropertyDetails(value_, representation);
+ }
+
// Conversion for storing details as Object*.
explicit inline PropertyDetails(Smi* smi);
inline Smi* AsSmi();
+ static uint8_t EncodeRepresentation(Representation representation) {
+ return representation.kind();
+ }
+
+ static Representation DecodeRepresentation(uint32_t bits) {
+ return Representation::FromKind(static_cast<Representation::Kind>(bits));
+ }
+
PropertyType type() { return TypeField::decode(value_); }
PropertyAttributes attributes() const {
@@ -110,8 +201,8 @@ class PropertyDetails BASE_EMBEDDED {
return DictionaryStorageField::decode(value_);
}
- int descriptor_index() {
- return DescriptorStorageField::decode(value_);
+ Representation representation() {
+ return DecodeRepresentation(RepresentationField::decode(value_));
}
inline PropertyDetails AsDeleted();
@@ -131,14 +222,18 @@ class PropertyDetails BASE_EMBEDDED {
class AttributesField: public BitField<PropertyAttributes, 3, 3> {};
class DeletedField: public BitField<uint32_t, 6, 1> {};
class DictionaryStorageField: public BitField<uint32_t, 7, 24> {};
- class DescriptorStorageField: public BitField<uint32_t, 7, 11> {};
- class DescriptorPointer: public BitField<uint32_t, 18, 11> {};
+ class DescriptorPointer: public BitField<uint32_t, 7, 11> {};
+ class RepresentationField: public BitField<uint32_t, 18, 3> {};
static const int kInitialIndex = 1;
private:
PropertyDetails(int value, int pointer) {
- value_ = DescriptorPointer::update(value, pointer);
+ value_ = DescriptorPointer::update(value, pointer);
+ }
+ PropertyDetails(int value, Representation representation) {
+ value_ = RepresentationField::update(
+ value, EncodeRepresentation(representation));
}
uint32_t value_;
diff --git a/deps/v8/src/property.cc b/deps/v8/src/property.cc
index cbf2fc859..80a06cb7f 100644
--- a/deps/v8/src/property.cc
+++ b/deps/v8/src/property.cc
@@ -112,7 +112,6 @@ void Descriptor::Print(FILE* out) {
GetKey()->ShortPrint(out);
PrintF(out, " @ ");
GetValue()->ShortPrint(out);
- PrintF(out, " %d\n", GetDetails().descriptor_index());
}
diff --git a/deps/v8/src/property.h b/deps/v8/src/property.h
index bbba8aea6..606f11152 100644
--- a/deps/v8/src/property.h
+++ b/deps/v8/src/property.h
@@ -64,10 +64,6 @@ class Descriptor BASE_EMBEDDED {
void Print(FILE* out);
#endif
- void SetEnumerationIndex(int index) {
- details_ = PropertyDetails(details_.attributes(), details_.type(), index);
- }
-
void SetSortedKeyIndex(int index) { details_ = details_.set_pointer(index); }
private:
@@ -93,10 +89,10 @@ class Descriptor BASE_EMBEDDED {
Object* value,
PropertyAttributes attributes,
PropertyType type,
- int index)
+ Representation representation)
: key_(key),
value_(value),
- details_(attributes, type, index) { }
+ details_(attributes, type, representation) { }
friend class DescriptorArray;
};
@@ -107,8 +103,9 @@ class FieldDescriptor: public Descriptor {
FieldDescriptor(Name* key,
int field_index,
PropertyAttributes attributes,
- int index = 0)
- : Descriptor(key, Smi::FromInt(field_index), attributes, FIELD, index) {}
+ Representation representation)
+ : Descriptor(key, Smi::FromInt(field_index), attributes,
+ FIELD, representation) {}
};
@@ -116,9 +113,9 @@ class ConstantFunctionDescriptor: public Descriptor {
public:
ConstantFunctionDescriptor(Name* key,
JSFunction* function,
- PropertyAttributes attributes,
- int index)
- : Descriptor(key, function, attributes, CONSTANT_FUNCTION, index) {}
+ PropertyAttributes attributes)
+ : Descriptor(key, function, attributes, CONSTANT_FUNCTION,
+ Representation::Tagged()) {}
};
@@ -126,9 +123,9 @@ class CallbacksDescriptor: public Descriptor {
public:
CallbacksDescriptor(Name* key,
Object* foreign,
- PropertyAttributes attributes,
- int index = 0)
- : Descriptor(key, foreign, attributes, CALLBACKS, index) {}
+ PropertyAttributes attributes)
+ : Descriptor(key, foreign, attributes, CALLBACKS,
+ Representation::Tagged()) {}
};
@@ -190,7 +187,7 @@ class LookupResult BASE_EMBEDDED {
lookup_type_(NOT_FOUND),
holder_(NULL),
cacheable_(true),
- details_(NONE, NONEXISTENT) {
+ details_(NONE, NONEXISTENT, Representation::None()) {
isolate->SetTopLookupResult(this);
}
@@ -208,9 +205,13 @@ class LookupResult BASE_EMBEDDED {
number_ = number;
}
+ bool CanHoldValue(Handle<Object> value) {
+ return value->FitsRepresentation(details_.representation());
+ }
+
void TransitionResult(JSObject* holder, int number) {
lookup_type_ = TRANSITION_TYPE;
- details_ = PropertyDetails(NONE, TRANSITION);
+ details_ = PropertyDetails(NONE, TRANSITION, Representation::None());
holder_ = holder;
number_ = number;
}
@@ -225,19 +226,19 @@ class LookupResult BASE_EMBEDDED {
void HandlerResult(JSProxy* proxy) {
lookup_type_ = HANDLER_TYPE;
holder_ = proxy;
- details_ = PropertyDetails(NONE, HANDLER);
+ details_ = PropertyDetails(NONE, HANDLER, Representation::None());
cacheable_ = false;
}
void InterceptorResult(JSObject* holder) {
lookup_type_ = INTERCEPTOR_TYPE;
holder_ = holder;
- details_ = PropertyDetails(NONE, INTERCEPTOR);
+ details_ = PropertyDetails(NONE, INTERCEPTOR, Representation::None());
}
void NotFound() {
lookup_type_ = NOT_FOUND;
- details_ = PropertyDetails(NONE, NONEXISTENT);
+ details_ = PropertyDetails(NONE, NONEXISTENT, Representation::None());
holder_ = NULL;
}
@@ -256,6 +257,13 @@ class LookupResult BASE_EMBEDDED {
return details_.type();
}
+ Representation representation() {
+ ASSERT(IsFound());
+ ASSERT(!IsTransition());
+ ASSERT(details_.type() != NONEXISTENT);
+ return details_.representation();
+ }
+
PropertyAttributes GetAttributes() {
ASSERT(!IsTransition());
ASSERT(IsFound());
@@ -340,7 +348,7 @@ class LookupResult BASE_EMBEDDED {
Object* GetLazyValue() {
switch (type()) {
case FIELD:
- return holder()->FastPropertyAt(GetFieldIndex().field_index());
+ return holder()->RawFastPropertyAt(GetFieldIndex().field_index());
case NORMAL: {
Object* value;
value = holder()->property_dictionary()->ValueAt(GetDictionaryEntry());
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index ebe88fe9b..4e00b2941 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -28,6 +28,8 @@
#include <stdlib.h>
#include <limits>
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
+
#include "v8.h"
#include "accessors.h"
@@ -232,7 +234,9 @@ static Handle<Object> CreateObjectLiteralBoilerplate(
constant_properties,
&is_result_from_cache);
- Handle<JSObject> boilerplate = isolate->factory()->NewJSObjectFromMap(map);
+ Handle<JSObject> boilerplate =
+ isolate->factory()->NewJSObjectFromMap(
+ map, isolate->heap()->GetPretenureMode());
// Normalize the elements of the boilerplate to save space if needed.
if (!should_have_fast_elements) JSObject::NormalizeElements(boilerplate);
@@ -249,6 +253,7 @@ static Handle<Object> CreateObjectLiteralBoilerplate(
boilerplate, KEEP_INOBJECT_PROPERTIES, length / 2);
}
+ // TODO(verwaest): Support tracking representations in the boilerplate.
for (int index = 0; index < length; index +=2) {
Handle<Object> key(constant_properties->get(index+0), isolate);
Handle<Object> value(constant_properties->get(index+1), isolate);
@@ -335,8 +340,10 @@ Handle<Object> Runtime::CreateArrayLiteralBoilerplate(
// Create the JSArray.
Handle<JSFunction> constructor(
JSFunction::NativeContextFromLiterals(*literals)->array_function());
- Handle<JSArray> object =
- Handle<JSArray>::cast(isolate->factory()->NewJSObject(constructor));
+
+ Handle<JSArray> object = Handle<JSArray>::cast(
+ isolate->factory()->NewJSObject(
+ constructor, isolate->heap()->GetPretenureMode()));
ElementsKind constant_elements_kind =
static_cast<ElementsKind>(Smi::cast(elements->get(0))->value());
@@ -646,11 +653,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Fix) {
static void ArrayBufferWeakCallback(v8::Isolate* external_isolate,
- Persistent<Value> object,
+ Persistent<Value>* object,
void* data) {
Isolate* isolate = reinterpret_cast<Isolate*>(external_isolate);
HandleScope scope(isolate);
- Handle<Object> internal_object = Utils::OpenHandle(*object);
+ Handle<Object> internal_object = Utils::OpenHandle(**object);
size_t allocated_length = NumberToSize(
isolate, JSArrayBuffer::cast(*internal_object)->byte_length());
@@ -658,7 +665,7 @@ static void ArrayBufferWeakCallback(v8::Isolate* external_isolate,
-static_cast<intptr_t>(allocated_length));
if (data != NULL)
free(data);
- object.Dispose(external_isolate);
+ object->Dispose(external_isolate);
}
@@ -773,7 +780,8 @@ enum TypedArrayId {
ARRAY_ID_UINT32 = 5,
ARRAY_ID_INT32 = 6,
ARRAY_ID_FLOAT32 = 7,
- ARRAY_ID_FLOAT64 = 8
+ ARRAY_ID_FLOAT64 = 8,
+ ARRAY_ID_UINT8C = 9
};
@@ -830,6 +838,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitialize) {
arrayType = kExternalDoubleArray;
elementSize = 8;
break;
+ case ARRAY_ID_UINT8C:
+ elementsKind = EXTERNAL_PIXEL_ELEMENTS;
+ arrayType = kExternalPixelArray;
+ elementSize = 1;
+ break;
default:
UNREACHABLE();
return NULL;
@@ -860,11 +873,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitialize) {
#define TYPED_ARRAY_GETTER(getter, accessor) \
- RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayGet##getter) { \
- HandleScope scope(isolate); \
- ASSERT(args.length() == 1); \
- CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, holder, 0); \
- return holder->accessor(); \
+ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayGet##getter) { \
+ HandleScope scope(isolate); \
+ ASSERT(args.length() == 1); \
+ CONVERT_ARG_HANDLE_CHECKED(Object, holder, 0); \
+ if (!holder->IsJSTypedArray()) \
+ return isolate->Throw(*isolate->factory()->NewTypeError( \
+ "not_typed_array", HandleVector<Object>(NULL, 0))); \
+ Handle<JSTypedArray> typed_array(JSTypedArray::cast(*holder)); \
+ return typed_array->accessor(); \
}
TYPED_ARRAY_GETTER(Buffer, buffer)
@@ -874,6 +891,128 @@ TYPED_ARRAY_GETTER(Length, length)
#undef TYPED_ARRAY_GETTER
+RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArraySetFastCases) {
+ HandleScope scope(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(Object, target_obj, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, source_obj, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, offset_obj, 2);
+
+ if (!target_obj->IsJSTypedArray())
+ return isolate->Throw(*isolate->factory()->NewTypeError(
+ "not_typed_array", HandleVector<Object>(NULL, 0)));
+
+ if (!source_obj->IsJSTypedArray())
+ return isolate->heap()->false_value();
+
+ Handle<JSTypedArray> target(JSTypedArray::cast(*target_obj));
+ Handle<JSTypedArray> source(JSTypedArray::cast(*source_obj));
+ size_t offset = NumberToSize(isolate, *offset_obj);
+ size_t target_length = NumberToSize(isolate, target->length());
+ size_t source_length = NumberToSize(isolate, source->length());
+ size_t target_byte_length = NumberToSize(isolate, target->byte_length());
+ size_t source_byte_length = NumberToSize(isolate, source->byte_length());
+ if (offset > target_length ||
+ offset + source_length > target_length ||
+ offset + source_length < offset) // overflow
+ return isolate->Throw(*isolate->factory()->NewRangeError(
+ "typed_array_set_source_too_large", HandleVector<Object>(NULL, 0)));
+
+ Handle<JSArrayBuffer> target_buffer(JSArrayBuffer::cast(target->buffer()));
+ Handle<JSArrayBuffer> source_buffer(JSArrayBuffer::cast(source->buffer()));
+ size_t target_offset = NumberToSize(isolate, target->byte_offset());
+ size_t source_offset = NumberToSize(isolate, source->byte_offset());
+ uint8_t* target_base =
+ static_cast<uint8_t*>(target_buffer->backing_store()) + target_offset;
+ uint8_t* source_base =
+ static_cast<uint8_t*>(source_buffer->backing_store()) + source_offset;
+
+ // Typed arrays of the same type: use memmove.
+ if (target->type() == source->type()) {
+ memmove(target_base + offset * target->element_size(),
+ source_base, source_byte_length);
+ return isolate->heap()->true_value();
+ }
+
+ // Typed arrays of different types over the same backing store
+ if ((source_base <= target_base &&
+ source_base + source_byte_length > target_base) ||
+ (target_base <= source_base &&
+ target_base + target_byte_length > source_base)) {
+ size_t target_element_size = target->element_size();
+ size_t source_element_size = source->element_size();
+
+ size_t source_length = NumberToSize(isolate, source->length());
+
+ // Copy left part
+ size_t left_index;
+ {
+ // First un-mutated byte after the next write
+ uint8_t* target_ptr = target_base + (offset + 1) * target_element_size;
+ // Next read at source_ptr. We do not care for memory changing before
+ // source_ptr - we have already copied it.
+ uint8_t* source_ptr = source_base;
+ for (left_index = 0;
+ left_index < source_length && target_ptr <= source_ptr;
+ left_index++) {
+ Handle<Object> v = Object::GetElement(
+ source, static_cast<uint32_t>(left_index));
+ JSObject::SetElement(
+ target, static_cast<uint32_t>(offset + left_index), v,
+ NONE, kNonStrictMode);
+ target_ptr += target_element_size;
+ source_ptr += source_element_size;
+ }
+ }
+ // Copy right part
+ size_t right_index;
+ {
+ // First unmutated byte before the next write
+ uint8_t* target_ptr =
+ target_base + (offset + source_length - 1) * target_element_size;
+ // Next read before source_ptr. We do not care for memory changing after
+ // source_ptr - we have already copied it.
+ uint8_t* source_ptr =
+ source_base + source_length * source_element_size;
+ for (right_index = source_length - 1;
+ right_index >= left_index && target_ptr >= source_ptr;
+ right_index--) {
+ Handle<Object> v = Object::GetElement(
+ source, static_cast<uint32_t>(right_index));
+ JSObject::SetElement(
+ target, static_cast<uint32_t>(offset + right_index), v,
+ NONE, kNonStrictMode);
+ target_ptr -= target_element_size;
+ source_ptr -= source_element_size;
+ }
+ }
+ // There can be at most 8 entries left in the middle that need buffering
+ // (because the largest element_size is 8 times the smallest).
+ ASSERT((right_index + 1) - left_index <= 8);
+ Handle<Object> temp[8];
+ size_t idx;
+ for (idx = left_index; idx <= right_index; idx++) {
+ temp[idx - left_index] = Object::GetElement(
+ source, static_cast<uint32_t>(idx));
+ }
+ for (idx = left_index; idx <= right_index; idx++) {
+ JSObject::SetElement(
+ target, static_cast<uint32_t>(offset + idx), temp[idx-left_index],
+ NONE, kNonStrictMode);
+ }
+ } else { // Non-overlapping typed arrays
+ for (size_t idx = 0; idx < source_length; idx++) {
+ Handle<Object> value = Object::GetElement(
+ source, static_cast<uint32_t>(idx));
+ JSObject::SetElement(
+ target, static_cast<uint32_t>(offset + idx), value,
+ NONE, kNonStrictMode);
+ }
+ }
+
+ return isolate->heap()->true_value();
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_SetInitialize) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -2180,6 +2319,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionMarkNameShouldPrintAsAnonymous) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionIsGenerator) {
+ NoHandleAllocation ha(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(JSFunction, f, 0);
+ return isolate->heap()->ToBoolean(f->shared()->is_generator());
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionRemovePrototype) {
NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
@@ -2291,8 +2438,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetReadOnlyPrototype) {
CallbacksDescriptor new_desc(name,
instance_desc->GetValue(index),
- static_cast<PropertyAttributes>(details.attributes() | READ_ONLY),
- details.descriptor_index());
+ static_cast<PropertyAttributes>(details.attributes() | READ_ONLY));
// Create a new map featuring the new field descriptors array.
Map* new_map;
@@ -2431,57 +2577,53 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSGeneratorObject) {
generator->set_receiver(frame->receiver());
generator->set_continuation(0);
generator->set_operand_stack(isolate->heap()->empty_fixed_array());
+ generator->set_stack_handler_index(-1);
return generator;
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_SuspendJSGeneratorObject) {
- HandleScope scope(isolate);
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator_object, 0);
+ CONVERT_ARG_CHECKED(JSGeneratorObject, generator_object, 0);
JavaScriptFrameIterator stack_iterator(isolate);
- JavaScriptFrame *frame = stack_iterator.frame();
- Handle<JSFunction> function(JSFunction::cast(frame->function()));
+ JavaScriptFrame* frame = stack_iterator.frame();
+ JSFunction* function = JSFunction::cast(frame->function());
RUNTIME_ASSERT(function->shared()->is_generator());
+ ASSERT_EQ(function, generator_object->function());
- intptr_t offset = frame->pc() - function->code()->instruction_start();
- ASSERT(*function == generator_object->function());
- ASSERT(offset > 0 && Smi::IsValid(offset));
- generator_object->set_continuation(static_cast<int>(offset));
+ // We expect there to be at least two values on the operand stack: the return
+ // value of the yield expression, and the argument to this runtime call.
+ // Neither of those should be saved.
+ int operands_count = frame->ComputeOperandsCount();
+ ASSERT(operands_count >= 2);
+ operands_count -= 2;
- // Generator functions force context allocation for locals, so Local0 points
- // to the bottom of the operand stack. Assume the stack grows down.
- //
- // TODO(wingo): Move these magical calculations to frames.h when the
- // generators implementation has stabilized.
- intptr_t stack_size_in_bytes =
- (frame->fp() + JavaScriptFrameConstants::kLocal0Offset) -
- (frame->sp() - kPointerSize);
- ASSERT(IsAddressAligned(frame->fp(), kPointerSize));
- ASSERT(IsAligned(stack_size_in_bytes, kPointerSize));
- ASSERT(stack_size_in_bytes >= 0);
- ASSERT(Smi::IsValid(stack_size_in_bytes));
- intptr_t stack_size = stack_size_in_bytes >> kPointerSizeLog2;
-
- // We expect there to be at least two values on the stack: the return value of
- // the yield expression, and the argument to this runtime call. Neither of
- // those should be saved.
- ASSERT(stack_size >= 2);
- stack_size -= 2;
-
- if (stack_size == 0) {
+ if (operands_count == 0) {
ASSERT_EQ(generator_object->operand_stack(),
isolate->heap()->empty_fixed_array());
+ ASSERT_EQ(generator_object->stack_handler_index(), -1);
// If there are no operands on the stack, there shouldn't be a handler
// active either.
ASSERT(!frame->HasHandler());
} else {
- // TODO(wingo): Save the operand stack and/or the stack handlers.
- UNIMPLEMENTED();
+ int stack_handler_index = -1;
+ MaybeObject* alloc = isolate->heap()->AllocateFixedArray(operands_count);
+ FixedArray* operand_stack;
+ if (!alloc->To(&operand_stack)) return alloc;
+ frame->SaveOperandStack(operand_stack, &stack_handler_index);
+ generator_object->set_operand_stack(operand_stack);
+ generator_object->set_stack_handler_index(stack_handler_index);
}
+ // Set continuation down here to avoid side effects if the operand stack
+ // allocation fails.
+ intptr_t offset = frame->pc() - function->code()->instruction_start();
+ ASSERT(offset > 0 && Smi::IsValid(offset));
+ generator_object->set_continuation(static_cast<int>(offset));
+
// It's possible for the context to be other than the initial context even if
// there is no stack handler active. For example, this is the case in the
// body of a "with" statement. Therefore we always save the context.
@@ -2501,13 +2643,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SuspendJSGeneratorObject) {
// EmitGeneratorResumeResume is called in any case, as it needs to reconstruct
// the stack frame and make space for arguments and operands.
RUNTIME_FUNCTION(MaybeObject*, Runtime_ResumeJSGeneratorObject) {
- HandleScope scope(isolate);
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator_object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
+ CONVERT_ARG_CHECKED(JSGeneratorObject, generator_object, 0);
+ CONVERT_ARG_CHECKED(Object, value, 1);
CONVERT_SMI_ARG_CHECKED(resume_mode_int, 2);
JavaScriptFrameIterator stack_iterator(isolate);
- JavaScriptFrame *frame = stack_iterator.frame();
+ JavaScriptFrame* frame = stack_iterator.frame();
ASSERT_EQ(frame->function(), generator_object->function());
@@ -2520,18 +2662,22 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ResumeJSGeneratorObject) {
frame->set_pc(pc + offset);
generator_object->set_continuation(JSGeneratorObject::kGeneratorExecuting);
- if (generator_object->operand_stack()->length() != 0) {
- // TODO(wingo): Copy operand stack. Rewind handlers.
- UNIMPLEMENTED();
+ FixedArray* operand_stack = generator_object->operand_stack();
+ int operands_count = operand_stack->length();
+ if (operands_count != 0) {
+ frame->RestoreOperandStack(operand_stack,
+ generator_object->stack_handler_index());
+ generator_object->set_operand_stack(isolate->heap()->empty_fixed_array());
+ generator_object->set_stack_handler_index(-1);
}
JSGeneratorObject::ResumeMode resume_mode =
static_cast<JSGeneratorObject::ResumeMode>(resume_mode_int);
switch (resume_mode) {
case JSGeneratorObject::SEND:
- return *value;
+ return value;
case JSGeneratorObject::THROW:
- return isolate->Throw(*value);
+ return isolate->Throw(value);
}
UNREACHABLE();
@@ -2544,7 +2690,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ThrowGeneratorStateError) {
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
int continuation = generator->continuation();
- const char *message = continuation == JSGeneratorObject::kGeneratorClosed ?
+ const char* message = continuation == JSGeneratorObject::kGeneratorClosed ?
"generator_finished" : "generator_running";
Vector< Handle<Object> > argv = HandleVector<Object>(NULL, 0);
Handle<Object> error = isolate->factory()->NewError(message, argv);
@@ -3339,8 +3485,8 @@ MUST_USE_RESULT static MaybeObject* StringReplaceGlobalRegExpWithString(
// Shortcut for simple non-regexp global replacements
if (regexp->TypeTag() == JSRegExp::ATOM && simple_replace) {
- if (subject->IsOneByteConvertible() &&
- replacement->IsOneByteConvertible()) {
+ if (subject->HasOnlyOneByteChars() &&
+ replacement->HasOnlyOneByteChars()) {
return StringReplaceGlobalAtomRegExpWithString<SeqOneByteString>(
isolate, subject, regexp, replacement, last_match_info);
} else {
@@ -3522,7 +3668,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringReplaceGlobalRegExpWithString) {
if (!subject->IsFlat()) subject = FlattenGetString(subject);
if (replacement->length() == 0) {
- if (subject->IsOneByteConvertible()) {
+ if (subject->HasOnlyOneByteChars()) {
return StringReplaceGlobalRegExpWithEmptyString<SeqOneByteString>(
isolate, subject, regexp, last_match_info);
} else {
@@ -4327,7 +4473,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) {
KeyedLookupCache* keyed_lookup_cache = isolate->keyed_lookup_cache();
int offset = keyed_lookup_cache->Lookup(receiver_map, key);
if (offset != -1) {
- Object* value = receiver->FastPropertyAt(offset);
+ // Doubles are not cached, so raw read the value.
+ Object* value = receiver->RawFastPropertyAt(offset);
return value->IsTheHole()
? isolate->heap()->undefined_value()
: value;
@@ -4338,8 +4485,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) {
receiver->LocalLookup(key, &result);
if (result.IsField()) {
int offset = result.GetFieldIndex().field_index();
- keyed_lookup_cache->Update(receiver_map, key, offset);
- return receiver->FastPropertyAt(offset);
+ // Do not track double fields in the keyed lookup cache. Reading
+ // double values requires boxing.
+ if (!FLAG_track_double_fields ||
+ !result.representation().IsDouble()) {
+ keyed_lookup_cache->Update(receiver_map, key, offset);
+ }
+ return receiver->FastPropertyAt(result.representation(), offset);
}
} else {
// Attempt dictionary lookup.
@@ -4515,6 +4667,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDataProperty) {
return lookup.holder()->GetNormalizedProperty(&lookup);
case FIELD:
return lookup.holder()->FastPropertyAt(
+ lookup.representation(),
lookup.GetFieldIndex().field_index());
case CONSTANT_FUNCTION:
return lookup.GetConstantFunction();
@@ -6113,7 +6266,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToStringSkipCache) {
Object* number = args[0];
RUNTIME_ASSERT(number->IsNumber());
- return isolate->heap()->NumberToString(number, false);
+ return isolate->heap()->NumberToString(
+ number, false, isolate->heap()->GetPretenureMode());
}
@@ -6381,7 +6535,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
if (first->IsString()) return first;
}
- bool one_byte = special->IsOneByteConvertible();
+ bool one_byte = special->HasOnlyOneByteChars();
int position = 0;
for (int i = 0; i < array_length; i++) {
int increment = 0;
@@ -6422,7 +6576,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
String* element = String::cast(elt);
int element_length = element->length();
increment = element_length;
- if (one_byte && !element->IsOneByteConvertible()) {
+ if (one_byte && !element->HasOnlyOneByteChars()) {
one_byte = false;
}
} else {
@@ -7570,20 +7724,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObjectFromBound) {
}
-static void TrySettingInlineConstructStub(Isolate* isolate,
- Handle<JSFunction> function) {
- Handle<Object> prototype = isolate->factory()->null_value();
- if (function->has_instance_prototype()) {
- prototype = Handle<Object>(function->instance_prototype(), isolate);
- }
- if (function->shared()->CanGenerateInlineConstructor(*prototype)) {
- ConstructStubCompiler compiler(isolate);
- Handle<Code> code = compiler.CompileConstructStub(function);
- function->shared()->set_construct_stub(*code);
- }
-}
-
-
RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObject) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -7647,13 +7787,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObject) {
shared->CompleteInobjectSlackTracking();
}
- bool first_allocation = !shared->live_objects_may_exist();
Handle<JSObject> result = isolate->factory()->NewJSObject(function);
RETURN_IF_EMPTY_HANDLE(isolate, result);
- // Delay setting the stub if inobject slack tracking is in progress.
- if (first_allocation && !shared->IsInobjectSlackTrackingInProgress()) {
- TrySettingInlineConstructStub(isolate, function);
- }
isolate->counters()->constructed_objects()->Increment();
isolate->counters()->constructed_objects_runtime()->Increment();
@@ -7668,7 +7803,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FinalizeInstanceSize) {
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
function->shared()->CompleteInobjectSlackTracking();
- TrySettingInlineConstructStub(isolate, function);
return isolate->heap()->undefined_value();
}
@@ -10050,14 +10184,18 @@ static MaybeObject* DebugLookupResultValue(Heap* heap,
return heap->undefined_value();
}
return value;
- case FIELD:
- value =
+ case FIELD: {
+ Object* value;
+ MaybeObject* maybe_value =
JSObject::cast(result->holder())->FastPropertyAt(
+ result->representation(),
result->GetFieldIndex().field_index());
+ if (!maybe_value->To(&value)) return maybe_value;
if (value->IsTheHole()) {
return heap->undefined_value();
}
return value;
+ }
case CONSTANT_FUNCTION:
return result->GetConstantFunction();
case CALLBACKS: {
@@ -10144,7 +10282,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetPropertyDetails) {
}
}
details->set(0, element_or_char);
- details->set(1, PropertyDetails(NONE, NORMAL).AsSmi());
+ details->set(
+ 1, PropertyDetails(NONE, NORMAL, Representation::None()).AsSmi());
return *isolate->factory()->NewJSArrayWithElements(details);
}
@@ -13333,7 +13472,7 @@ MaybeObject* Runtime::InitializeIntrinsicFunctionNames(Heap* heap,
{ MaybeObject* maybe_dictionary = name_dictionary->Add(
String::cast(name_string),
Smi::FromInt(i),
- PropertyDetails(NONE, NORMAL));
+ PropertyDetails(NONE, NORMAL, Representation::None()));
if (!maybe_dictionary->ToObject(&dictionary)) {
// Non-recoverable failure. Calling code must restart heap
// initialization.
diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h
index 2a102e12f..c91fee6ec 100644
--- a/deps/v8/src/runtime.h
+++ b/deps/v8/src/runtime.h
@@ -231,6 +231,7 @@ namespace internal {
F(FunctionSetName, 2, 1) \
F(FunctionNameShouldPrintAsAnonymous, 1, 1) \
F(FunctionMarkNameShouldPrintAsAnonymous, 1, 1) \
+ F(FunctionIsGenerator, 1, 1) \
F(FunctionBindArguments, 4, 1) \
F(BoundFunctionGetBindings, 1, 1) \
F(FunctionRemovePrototype, 1, 1) \
@@ -360,6 +361,7 @@ namespace internal {
F(TypedArrayGetByteLength, 1, 1) \
F(TypedArrayGetByteOffset, 1, 1) \
F(TypedArrayGetLength, 1, 1) \
+ F(TypedArraySetFastCases, 3, 1) \
\
/* Statements */ \
F(NewClosure, 3, 1) \
diff --git a/deps/v8/src/sampler.cc b/deps/v8/src/sampler.cc
index e271470bd..da186b6ce 100644
--- a/deps/v8/src/sampler.cc
+++ b/deps/v8/src/sampler.cc
@@ -26,7 +26,8 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) \
- || defined(__NetBSD__) || defined(__sun) || defined(__ANDROID__)
+ || defined(__NetBSD__) || defined(__sun) || defined(__ANDROID__) \
+ || defined(__native_client__)
#define USE_SIGNALS
diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/scopes.cc
index 5ad970ad8..208dc76ac 100644
--- a/deps/v8/src/scopes.cc
+++ b/deps/v8/src/scopes.cc
@@ -727,8 +727,9 @@ int Scope::ContextChainLength(Scope* scope) {
for (Scope* s = this; s != scope; s = s->outer_scope_) {
ASSERT(s != NULL); // scope must be in the scope chain
if (s->is_with_scope() || s->num_heap_slots() > 0) n++;
- // Catch scopes always have heap slots.
+ // Catch and module scopes always have heap slots.
ASSERT(!s->is_catch_scope() || s->num_heap_slots() > 0);
+ ASSERT(!s->is_module_scope() || s->num_heap_slots() > 0);
}
return n;
}
diff --git a/deps/v8/src/serialize.cc b/deps/v8/src/serialize.cc
index d4f31c1e1..3e70edc59 100644
--- a/deps/v8/src/serialize.cc
+++ b/deps/v8/src/serialize.cc
@@ -558,6 +558,11 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
UNCLASSIFIED,
58,
"Runtime::AllocateInOldPointerSpace");
+ Add(ExternalReference::new_space_high_promotion_mode_active_address(isolate).
+ address(),
+ UNCLASSIFIED,
+ 59,
+ "Heap::NewSpaceAllocationLimitAddress");
// Add a small set of deopt entry addresses to encoder without generating the
// deopt table code, which isn't possible at deserialization time.
@@ -568,7 +573,7 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
entry,
Deoptimizer::LAZY,
Deoptimizer::CALCULATE_ENTRY_ADDRESS);
- Add(address, LAZY_DEOPTIMIZATION, 59 + entry, "lazy_deopt");
+ Add(address, LAZY_DEOPTIMIZATION, 60 + entry, "lazy_deopt");
}
}
diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc
index df1c3ef12..099ad93a1 100644
--- a/deps/v8/src/spaces.cc
+++ b/deps/v8/src/spaces.cc
@@ -2304,14 +2304,6 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
// Don't free list allocate if there is linear space available.
ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
- int new_node_size = 0;
- FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
- if (new_node == NULL) return NULL;
-
-
- int bytes_left = new_node_size - size_in_bytes;
- ASSERT(bytes_left >= 0);
-
int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
// Mark the old linear allocation area with a free space map so it can be
// skipped when scanning the heap. This also puts it back in the free list
@@ -2321,6 +2313,16 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
owner_->heap()->incremental_marking()->OldSpaceStep(
size_in_bytes - old_linear_size);
+ int new_node_size = 0;
+ FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
+ if (new_node == NULL) {
+ owner_->SetTop(NULL, NULL);
+ return NULL;
+ }
+
+ int bytes_left = new_node_size - size_in_bytes;
+ ASSERT(bytes_left >= 0);
+
#ifdef DEBUG
for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
reinterpret_cast<Object**>(new_node->address())[i] =
diff --git a/deps/v8/src/string-stream.cc b/deps/v8/src/string-stream.cc
index 61eec0d69..ebe1b5b43 100644
--- a/deps/v8/src/string-stream.cc
+++ b/deps/v8/src/string-stream.cc
@@ -350,9 +350,8 @@ void StringStream::PrintUsingMap(JSObject* js_object) {
}
int real_size = map->NumberOfOwnDescriptors();
DescriptorArray* descs = map->instance_descriptors();
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
+ for (int i = 0; i < real_size; i++) {
PropertyDetails details = descs->GetDetails(i);
- if (details.descriptor_index() > real_size) continue;
if (details.type() == FIELD) {
Object* key = descs->GetKey(i);
if (key->IsString() || key->IsNumber()) {
@@ -368,7 +367,7 @@ void StringStream::PrintUsingMap(JSObject* js_object) {
key->ShortPrint();
}
Add(": ");
- Object* value = js_object->FastPropertyAt(descs->GetFieldIndex(i));
+ Object* value = js_object->RawFastPropertyAt(descs->GetFieldIndex(i));
Add("%o\n", value);
}
}
diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc
index f928cf616..f43c9acee 100644
--- a/deps/v8/src/stub-cache.cc
+++ b/deps/v8/src/stub-cache.cc
@@ -221,11 +221,12 @@ Handle<Code> StubCache::ComputeLoadNonexistent(Handle<Name> name,
Handle<Code> StubCache::ComputeLoadField(Handle<Name> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
- PropertyIndex field) {
+ PropertyIndex field,
+ Representation representation) {
if (receiver.is_identical_to(holder)) {
- LoadFieldStub stub(LoadStubCompiler::receiver(),
- field.is_inobject(holder),
- field.translate(holder));
+ LoadFieldStub stub(field.is_inobject(holder),
+ field.translate(holder),
+ representation);
return stub.GetCode(isolate());
}
@@ -236,7 +237,7 @@ Handle<Code> StubCache::ComputeLoadField(Handle<Name> name,
LoadStubCompiler compiler(isolate_);
Handle<Code> handler =
- compiler.CompileLoadField(receiver, holder, name, field);
+ compiler.CompileLoadField(receiver, holder, name, field, representation);
JSObject::UpdateMapCodeCache(stub_holder, name, handler);
return handler;
}
@@ -337,11 +338,12 @@ Handle<Code> StubCache::ComputeLoadGlobal(Handle<Name> name,
Handle<Code> StubCache::ComputeKeyedLoadField(Handle<Name> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
- PropertyIndex field) {
+ PropertyIndex field,
+ Representation representation) {
if (receiver.is_identical_to(holder)) {
- LoadFieldStub stub(KeyedLoadStubCompiler::receiver(),
- field.is_inobject(holder),
- field.translate(holder));
+ KeyedLoadFieldStub stub(field.is_inobject(holder),
+ field.translate(holder),
+ representation);
return stub.GetCode(isolate());
}
@@ -352,7 +354,7 @@ Handle<Code> StubCache::ComputeKeyedLoadField(Handle<Name> name,
KeyedLoadStubCompiler compiler(isolate_);
Handle<Code> handler =
- compiler.CompileLoadField(receiver, holder, name, field);
+ compiler.CompileLoadField(receiver, holder, name, field, representation);
JSObject::UpdateMapCodeCache(stub_holder, name, handler);
return handler;
}
@@ -954,10 +956,11 @@ Handle<Code> StubCache::ComputeLoadElementPolymorphic(
Handle<Code> StubCache::ComputePolymorphicIC(MapHandleList* receiver_maps,
CodeHandleList* handlers,
+ int number_of_valid_maps,
Handle<Name> name) {
LoadStubCompiler ic_compiler(isolate_);
- Code::StubType type = handlers->length() == 1 ? handlers->at(0)->type()
- : Code::NORMAL;
+ Code::StubType type = number_of_valid_maps == 1 ? handlers->at(0)->type()
+ : Code::NORMAL;
Handle<Code> ic = ic_compiler.CompilePolymorphicIC(
receiver_maps, handlers, name, type, PROPERTY);
return ic;
@@ -1042,45 +1045,40 @@ void StubCache::Clear() {
void StubCache::CollectMatchingMaps(SmallMapList* types,
- Name* name,
+ Handle<Name> name,
Code::Flags flags,
Handle<Context> native_context,
Zone* zone) {
for (int i = 0; i < kPrimaryTableSize; i++) {
- if (primary_[i].key == name) {
+ if (primary_[i].key == *name) {
Map* map = primary_[i].map;
// Map can be NULL, if the stub is constant function call
// with a primitive receiver.
if (map == NULL) continue;
- int offset = PrimaryOffset(name, flags, map);
+ int offset = PrimaryOffset(*name, flags, map);
if (entry(primary_, offset) == &primary_[i] &&
!TypeFeedbackOracle::CanRetainOtherContext(map, *native_context)) {
- types->Add(Handle<Map>(map), zone);
+ types->AddMapIfMissing(Handle<Map>(map), zone);
}
}
}
for (int i = 0; i < kSecondaryTableSize; i++) {
- if (secondary_[i].key == name) {
+ if (secondary_[i].key == *name) {
Map* map = secondary_[i].map;
// Map can be NULL, if the stub is constant function call
// with a primitive receiver.
if (map == NULL) continue;
// Lookup in primary table and skip duplicates.
- int primary_offset = PrimaryOffset(name, flags, map);
- Entry* primary_entry = entry(primary_, primary_offset);
- if (primary_entry->key == name) {
- Map* primary_map = primary_entry->map;
- if (map == primary_map) continue;
- }
+ int primary_offset = PrimaryOffset(*name, flags, map);
// Lookup in secondary table and add matches.
- int offset = SecondaryOffset(name, flags, primary_offset);
+ int offset = SecondaryOffset(*name, flags, primary_offset);
if (entry(secondary_, offset) == &secondary_[i] &&
!TypeFeedbackOracle::CanRetainOtherContext(map, *native_context)) {
- types->Add(Handle<Map>(map), zone);
+ types->AddMapIfMissing(Handle<Map>(map), zone);
}
}
}
@@ -1500,16 +1498,17 @@ Register BaseLoadStubCompiler::HandlerFrontend(Handle<JSObject> object,
}
-Handle<Code> BaseLoadStubCompiler::CompileLoadField(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- PropertyIndex field) {
+Handle<Code> BaseLoadStubCompiler::CompileLoadField(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ PropertyIndex field,
+ Representation representation) {
Label miss;
Register reg = HandlerFrontendHeader(object, receiver(), holder, name, &miss);
- LoadFieldStub stub(reg, field.is_inobject(holder), field.translate(holder));
- GenerateTailCall(masm(), stub.GetCode(isolate()));
+ GenerateLoadField(reg, holder, field, representation);
__ bind(&miss);
TailCallBuiltin(masm(), MissBuiltin(kind()));
@@ -1519,19 +1518,6 @@ Handle<Code> BaseLoadStubCompiler::CompileLoadField(Handle<JSObject> object,
}
-// Load a fast property out of a holder object (src). In-object properties
-// are loaded directly otherwise the property is loaded from the properties
-// fixed array.
-void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst,
- Register src,
- Handle<JSObject> holder,
- PropertyIndex index) {
- DoGenerateFastPropertyLoad(
- masm, dst, src, index.is_inobject(holder), index.translate(holder));
-}
-
-
Handle<Code> BaseLoadStubCompiler::CompileLoadConstant(
Handle<JSObject> object,
Handle<JSObject> holder,
@@ -1594,17 +1580,16 @@ void BaseLoadStubCompiler::GenerateLoadPostInterceptor(
if (lookup->IsField()) {
PropertyIndex field = lookup->GetFieldIndex();
if (interceptor_holder.is_identical_to(holder)) {
- LoadFieldStub stub(interceptor_reg,
- field.is_inobject(holder),
- field.translate(holder));
- GenerateTailCall(masm(), stub.GetCode(isolate()));
+ GenerateLoadField(
+ interceptor_reg, holder, field, lookup->representation());
} else {
// We found FIELD property in prototype chain of interceptor's holder.
// Retrieve a field from field's holder.
Register reg = HandlerFrontend(
interceptor_holder, interceptor_reg, holder, name, &success);
__ bind(&success);
- GenerateLoadField(reg, holder, field);
+ GenerateLoadField(
+ reg, holder, field, lookup->representation());
}
} else {
// We found CALLBACKS property in prototype chain of interceptor's
@@ -1656,7 +1641,7 @@ Handle<Code> BaseStoreStubCompiler::CompileStoreTransition(
LookupResult* lookup,
Handle<Map> transition,
Handle<Name> name) {
- Label miss, miss_restore_name;
+ Label miss, miss_restore_name, slow;
GenerateNameCheck(name, this->name(), &miss);
@@ -1666,15 +1651,19 @@ Handle<Code> BaseStoreStubCompiler::CompileStoreTransition(
transition,
name,
receiver(), this->name(), value(),
- scratch1(), scratch2(),
+ scratch1(), scratch2(), scratch3(),
&miss,
- &miss_restore_name);
+ &miss_restore_name,
+ &slow);
// Handle store cache miss.
GenerateRestoreName(masm(), &miss_restore_name, name);
__ bind(&miss);
TailCallBuiltin(masm(), MissBuiltin(kind()));
+ GenerateRestoreName(masm(), &slow, name);
+ TailCallBuiltin(masm(), SlowBuiltin(kind()));
+
// Return the generated code.
return GetICCode(kind(), Code::MAP_TRANSITION, name);
}
diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h
index dbb5e90f2..cbaeacee8 100644
--- a/deps/v8/src/stub-cache.h
+++ b/deps/v8/src/stub-cache.h
@@ -112,7 +112,8 @@ class StubCache {
Handle<Code> ComputeLoadField(Handle<Name> name,
Handle<JSObject> object,
Handle<JSObject> holder,
- PropertyIndex field_index);
+ PropertyIndex field_index,
+ Representation representation);
Handle<Code> ComputeLoadCallback(Handle<Name> name,
Handle<JSObject> object,
@@ -147,7 +148,8 @@ class StubCache {
Handle<Code> ComputeKeyedLoadField(Handle<Name> name,
Handle<JSObject> object,
Handle<JSObject> holder,
- PropertyIndex field_index);
+ PropertyIndex field_index,
+ Representation representation);
Handle<Code> ComputeKeyedLoadCallback(
Handle<Name> name,
@@ -291,6 +293,7 @@ class StubCache {
Handle<Code> ComputePolymorphicIC(MapHandleList* receiver_maps,
CodeHandleList* handlers,
+ int number_of_valid_maps,
Handle<Name> name);
// Finds the Code object stored in the Heap::non_monomorphic_cache().
@@ -310,7 +313,7 @@ class StubCache {
// Collect all maps that match the name and flags.
void CollectMatchingMaps(SmallMapList* types,
- Name* name,
+ Handle<Name> name,
Code::Flags flags,
Handle<Context> native_context,
Zone* zone);
@@ -505,13 +508,9 @@ class StubCompiler BASE_EMBEDDED {
static void GenerateFastPropertyLoad(MacroAssembler* masm,
Register dst,
Register src,
- Handle<JSObject> holder,
- PropertyIndex index);
- static void DoGenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst,
- Register src,
- bool inobject,
- int index);
+ bool inobject,
+ int index,
+ Representation representation);
static void GenerateLoadArrayLength(MacroAssembler* masm,
Register receiver,
@@ -541,8 +540,10 @@ class StubCompiler BASE_EMBEDDED {
Register value_reg,
Register scratch1,
Register scratch2,
+ Register scratch3,
Label* miss_label,
- Label* miss_restore_name);
+ Label* miss_restore_name,
+ Label* slow);
void GenerateStoreField(MacroAssembler* masm,
Handle<JSObject> object,
@@ -564,6 +565,14 @@ class StubCompiler BASE_EMBEDDED {
}
return Builtins::kLoadIC_Miss;
}
+ static Builtins::Name SlowBuiltin(Code::Kind kind) {
+ switch (kind) {
+ case Code::STORE_IC: return Builtins::kStoreIC_Slow;
+ case Code::KEYED_STORE_IC: return Builtins::kKeyedStoreIC_Slow;
+ default: UNREACHABLE();
+ }
+ return Builtins::kStoreIC_Slow;
+ }
static void TailCallBuiltin(MacroAssembler* masm, Builtins::Name name);
// Generates code that verifies that the property holder has not changed
@@ -642,7 +651,8 @@ class BaseLoadStubCompiler: public StubCompiler {
Handle<Code> CompileLoadField(Handle<JSObject> object,
Handle<JSObject> holder,
Handle<Name> name,
- PropertyIndex index);
+ PropertyIndex index,
+ Representation representation);
Handle<Code> CompileLoadCallback(Handle<JSObject> object,
Handle<JSObject> holder,
@@ -694,7 +704,8 @@ class BaseLoadStubCompiler: public StubCompiler {
void GenerateLoadField(Register reg,
Handle<JSObject> holder,
- PropertyIndex index);
+ PropertyIndex field,
+ Representation representation);
void GenerateLoadConstant(Handle<JSFunction> value);
void GenerateLoadCallback(Register reg,
Handle<ExecutableAccessorInfo> callback);
diff --git a/deps/v8/src/third_party/vtune/v8-vtune.h b/deps/v8/src/third_party/vtune/v8-vtune.h
index 36c11e6a1..29ea3eacd 100644
--- a/deps/v8/src/third_party/vtune/v8-vtune.h
+++ b/deps/v8/src/third_party/vtune/v8-vtune.h
@@ -60,7 +60,7 @@
namespace vTune {
-void InitilizeVtuneForV8();
+void InitializeVtuneForV8();
} // namespace vTune
diff --git a/deps/v8/src/third_party/vtune/vtune-jit.cc b/deps/v8/src/third_party/vtune/vtune-jit.cc
index 6ff595fdf..d3f7a68f4 100644
--- a/deps/v8/src/third_party/vtune/vtune-jit.cc
+++ b/deps/v8/src/third_party/vtune/vtune-jit.cc
@@ -194,8 +194,8 @@ void VTUNEJITInterface::event_handler(const v8::JitCodeEvent* event) {
if ((*script->GetScriptName())->IsString()) {
Handle<String> script_name =
Handle<String>(String::Cast(*script->GetScriptName()));
- temp_file_name = new char[script_name->Length() + 1];
- script_name->WriteAscii(temp_file_name);
+ temp_file_name = new char[script_name->Utf8Length() + 1];
+ script_name->WriteUtf8(temp_file_name);
jmethod.source_file_name = temp_file_name;
}
@@ -267,7 +267,7 @@ void VTUNEJITInterface::event_handler(const v8::JitCodeEvent* event) {
} // namespace internal
-void InitilizeVtuneForV8() {
+void InitializeVtuneForV8() {
if (v8::V8::Initialize()) {
v8::V8::SetFlagsFromString("--nocompact_code_space",
(int)strlen("--nocompact_code_space"));
diff --git a/deps/v8/src/transitions.cc b/deps/v8/src/transitions.cc
index adbe6a1b3..df53178dd 100644
--- a/deps/v8/src/transitions.cc
+++ b/deps/v8/src/transitions.cc
@@ -135,6 +135,7 @@ MaybeObject* TransitionArray::CopyInsert(Name* name, Map* target) {
}
}
result->NoIncrementalWriteBarrierSet(insertion_index, name, target);
+ result->set_back_pointer_storage(back_pointer_storage());
return result;
}
diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc
index 3bc509a61..1757bee76 100644
--- a/deps/v8/src/type-info.cc
+++ b/deps/v8/src/type-info.cc
@@ -67,7 +67,7 @@ TypeFeedbackOracle::TypeFeedbackOracle(Handle<Code> code,
isolate_(isolate),
zone_(zone) {
BuildDictionary(code);
- ASSERT(reinterpret_cast<Address>(*dictionary_.location()) != kHandleZapValue);
+ ASSERT(dictionary_->IsDictionary());
}
@@ -539,15 +539,6 @@ TypeInfo TypeFeedbackOracle::IncrementType(CountOperation* expr) {
}
-static void AddMapIfMissing(Handle<Map> map, SmallMapList* list,
- Zone* zone) {
- for (int i = 0; i < list->length(); ++i) {
- if (list->at(i).is_identical_to(map)) return;
- }
- list->Add(map, zone);
-}
-
-
void TypeFeedbackOracle::CollectPolymorphicMaps(Handle<Code> code,
SmallMapList* types) {
MapHandleList maps;
@@ -556,7 +547,7 @@ void TypeFeedbackOracle::CollectPolymorphicMaps(Handle<Code> code,
for (int i = 0; i < maps.length(); i++) {
Handle<Map> map(maps.at(i));
if (!CanRetainOtherContext(*map, *native_context_)) {
- AddMapIfMissing(map, types, zone());
+ types->AddMapIfMissing(map, zone());
}
}
}
@@ -574,7 +565,7 @@ void TypeFeedbackOracle::CollectReceiverTypes(TypeFeedbackId ast_id,
// we need a generic store (or load) here.
ASSERT(Handle<Code>::cast(object)->ic_state() == GENERIC);
} else if (object->IsMap()) {
- types->Add(Handle<Map>::cast(object), zone());
+ types->AddMapIfMissing(Handle<Map>::cast(object), zone());
} else if (Handle<Code>::cast(object)->ic_state() == POLYMORPHIC) {
CollectPolymorphicMaps(Handle<Code>::cast(object), types);
} else if (FLAG_collect_megamorphic_maps_from_stub_cache &&
@@ -582,7 +573,7 @@ void TypeFeedbackOracle::CollectReceiverTypes(TypeFeedbackId ast_id,
types->Reserve(4, zone());
ASSERT(object->IsCode());
isolate_->stub_cache()->CollectMatchingMaps(types,
- *name,
+ name,
flags,
native_context_,
zone());
diff --git a/deps/v8/src/typedarray.js b/deps/v8/src/typedarray.js
index 24fcf1e45..4fade00e1 100644
--- a/deps/v8/src/typedarray.js
+++ b/deps/v8/src/typedarray.js
@@ -31,96 +31,73 @@
// in runtime.js:
// var $Array = global.Array;
-var $ArrayBuffer = global.__ArrayBuffer;
-// -------------------------------------------------------------------
-function ArrayBufferConstructor(byteLength) { // length = 1
- if (%_IsConstructCall()) {
- var l = TO_POSITIVE_INTEGER(byteLength);
- %ArrayBufferInitialize(this, l);
- } else {
- return new $ArrayBuffer(byteLength);
- }
-}
+// --------------- Typed Arrays ---------------------
-function ArrayBufferGetByteLength() {
- if (!IS_ARRAYBUFFER(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['ArrayBuffer.prototype.byteLength', this]);
- }
- return %ArrayBufferGetByteLength(this);
-}
+function CreateTypedArrayConstructor(name, elementSize, arrayId, constructor) {
+ function ConstructByArrayBuffer(obj, buffer, byteOffset, length) {
+ var offset = IS_UNDEFINED(byteOffset) ? 0 : TO_POSITIVE_INTEGER(byteOffset);
-// ES6 Draft 15.13.5.5.3
-function ArrayBufferSlice(start, end) {
- if (!IS_ARRAYBUFFER(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['ArrayBuffer.prototype.slice', this]);
- }
+ if (offset % elementSize !== 0) {
+ throw MakeRangeError("invalid_typed_array_alignment",
+ "start offset", name, elementSize);
+ }
+ var bufferByteLength = %ArrayBufferGetByteLength(buffer);
+ if (offset > bufferByteLength) {
+ throw MakeRangeError("invalid_typed_array_offset");
+ }
- var relativeStart = TO_INTEGER(start);
- var first;
- if (relativeStart < 0) {
- first = MathMax(this.byteLength + relativeStart, 0);
- } else {
- first = MathMin(relativeStart, this.byteLength);
- }
- var relativeEnd = IS_UNDEFINED(end) ? this.byteLength : TO_INTEGER(end);
- var fin;
- if (relativeEnd < 0) {
- fin = MathMax(this.byteLength + relativeEnd, 0);
- } else {
- fin = MathMin(relativeEnd, this.byteLength);
+ var newByteLength;
+ var newLength;
+ if (IS_UNDEFINED(length)) {
+ if (bufferByteLength % elementSize !== 0) {
+ throw MakeRangeError("invalid_typed_array_alignment",
+ "byte length", name, elementSize);
+ }
+ newByteLength = bufferByteLength - offset;
+ newLength = newByteLength / elementSize;
+ } else {
+ var newLength = TO_POSITIVE_INTEGER(length);
+ newByteLength = newLength * elementSize;
+ }
+ if (offset + newByteLength > bufferByteLength) {
+ throw MakeRangeError("invalid_typed_array_length");
+ }
+ %TypedArrayInitialize(obj, arrayId, buffer, offset, newByteLength);
}
- var newLen = fin - first;
- // TODO(dslomov): implement inheritance
- var result = new $ArrayBuffer(newLen);
-
- %ArrayBufferSliceImpl(this, result, first);
- return result;
-}
+ function ConstructByLength(obj, length) {
+ var l = IS_UNDEFINED(length) ? 0 : TO_POSITIVE_INTEGER(length);
+ var byteLength = l * elementSize;
+ var buffer = new global.ArrayBuffer(byteLength);
+ %TypedArrayInitialize(obj, arrayId, buffer, 0, byteLength);
+ }
-// --------------- Typed Arrays ---------------------
+ function ConstructByArrayLike(obj, arrayLike) {
+ var length = arrayLike.length;
+ var l = IS_UNDEFINED(length) ? 0 : TO_POSITIVE_INTEGER(length);
+ var byteLength = l * elementSize;
+ var buffer = new $ArrayBuffer(byteLength);
+ %TypedArrayInitialize(obj, arrayId, buffer, 0, byteLength);
+ for (var i = 0; i < l; i++) {
+ obj[i] = arrayLike[i];
+ }
+ }
-function CreateTypedArrayConstructor(name, elementSize, arrayId, constructor) {
- return function (buffer, byteOffset, length) {
+ return function (arg1, arg2, arg3) {
if (%_IsConstructCall()) {
- if (!IS_ARRAYBUFFER(buffer)) {
- throw MakeTypeError("Type error!");
- }
- var offset = IS_UNDEFINED(byteOffset)
- ? 0 : offset = TO_POSITIVE_INTEGER(byteOffset);
-
- if (offset % elementSize !== 0) {
- throw MakeRangeError("invalid_typed_array_alignment",
- "start offset", name, elementSize);
- }
- var bufferByteLength = %ArrayBufferGetByteLength(buffer);
- if (offset >= bufferByteLength) {
- throw MakeRangeError("invalid_typed_array_offset");
- }
-
- var newByteLength;
- var newLength;
- if (IS_UNDEFINED(length)) {
- if (bufferByteLength % elementSize !== 0) {
- throw MakeRangeError("invalid_typed_array_alignment",
- "byte length", name, elementSize);
- }
- newByteLength = bufferByteLength - offset;
- newLength = newByteLength / elementSize;
+ if (IS_ARRAYBUFFER(arg1)) {
+ ConstructByArrayBuffer(this, arg1, arg2, arg3);
+ } else if (IS_NUMBER(arg1) || IS_STRING(arg1) || IS_BOOLEAN(arg1)) {
+ ConstructByLength(this, arg1);
+ } else if (!IS_UNDEFINED(arg1)){
+ ConstructByArrayLike(this, arg1);
} else {
- var newLength = TO_POSITIVE_INTEGER(length);
- newByteLength = newLength * elementSize;
- }
- if (newByteLength > bufferByteLength) {
- throw MakeRangeError("invalid_typed_array_length");
+ throw MakeTypeError("parameterless_typed_array_constr", name);
}
- %TypedArrayInitialize(this, arrayId, buffer, offset, newByteLength);
} else {
- return new constructor(buffer, byteOffset, length);
+ return new constructor(arg1, arg2, arg3);
}
}
}
@@ -141,32 +118,57 @@ function TypedArrayGetLength() {
return %TypedArrayGetLength(this);
}
+function CreateSubArray(elementSize, constructor) {
+ return function(begin, end) {
+ var srcLength = %TypedArrayGetLength(this);
+ var beginInt = TO_INTEGER(begin);
+ if (beginInt < 0) {
+ beginInt = MathMax(0, srcLength + beginInt);
+ } else {
+ beginInt = MathMin(srcLength, beginInt);
+ }
-// -------------------------------------------------------------------
-
-function SetUpArrayBuffer() {
- %CheckIsBootstrapping();
-
- // Set up the ArrayBuffer constructor function.
- %SetCode($ArrayBuffer, ArrayBufferConstructor);
- %FunctionSetPrototype($ArrayBuffer, new $Object());
-
- // Set up the constructor property on the ArrayBuffer prototype object.
- %SetProperty($ArrayBuffer.prototype, "constructor", $ArrayBuffer, DONT_ENUM);
+ var endInt = IS_UNDEFINED(end) ? srcLength : TO_INTEGER(end);
+ if (endInt < 0) {
+ endInt = MathMax(0, srcLength + endInt);
+ } else {
+ endInt = MathMin(endInt, srcLength);
+ }
+ if (endInt < beginInt) {
+ endInt = beginInt;
+ }
+ var newLength = endInt - beginInt;
+ var beginByteOffset =
+ %TypedArrayGetByteOffset(this) + beginInt * elementSize;
+ return new constructor(%TypedArrayGetBuffer(this),
+ beginByteOffset, newLength);
+ }
+}
- InstallGetter($ArrayBuffer.prototype, "byteLength", ArrayBufferGetByteLength);
+function TypedArraySet(obj, offset) {
+ var intOffset = IS_UNDEFINED(offset) ? 0 : TO_POSITIVE_INTEGER(offset);
+ if (%TypedArraySetFastCases(this, obj, intOffset))
+ return;
- InstallFunctions($ArrayBuffer.prototype, DONT_ENUM, $Array(
- "slice", ArrayBufferSlice
- ));
+ var l = obj.length;
+ if (IS_UNDEFINED(l)) {
+ throw MakeTypeError("invalid_argument");
+ }
+ if (intOffset + l > this.length) {
+ throw MakeRangeError("typed_array_set_source_too_large");
+ }
+ for (var i = 0; i < l; i++) {
+ this[intOffset + i] = obj[i];
+ }
}
-SetUpArrayBuffer();
+// -------------------------------------------------------------------
function SetupTypedArray(arrayId, name, constructor, elementSize) {
- var f = CreateTypedArrayConstructor(name, elementSize,
- arrayId, constructor);
- %SetCode(constructor, f);
+ %CheckIsBootstrapping();
+ var fun = CreateTypedArrayConstructor(name, elementSize,
+ arrayId, constructor);
+ %SetCode(constructor, fun);
%FunctionSetPrototype(constructor, new $Object());
%SetProperty(constructor.prototype,
@@ -178,15 +180,20 @@ function SetupTypedArray(arrayId, name, constructor, elementSize) {
InstallGetter(constructor.prototype, "byteOffset", TypedArrayGetByteOffset);
InstallGetter(constructor.prototype, "byteLength", TypedArrayGetByteLength);
InstallGetter(constructor.prototype, "length", TypedArrayGetLength);
+
+ InstallFunctions(constructor.prototype, DONT_ENUM, $Array(
+ "subarray", CreateSubArray(elementSize, constructor),
+ "set", TypedArraySet
+ ));
}
// arrayIds below should be synchronized with Runtime_TypedArrayInitialize.
-SetupTypedArray(1, "Uint8Array", global.__Uint8Array, 1);
-SetupTypedArray(2, "Int8Array", global.__Int8Array, 1);
-SetupTypedArray(3, "Uint16Array", global.__Uint16Array, 2);
-SetupTypedArray(4, "Int16Array", global.__Int16Array, 2);
-SetupTypedArray(5, "Uint32Array", global.__Uint32Array, 4);
-SetupTypedArray(6, "Int32Array", global.__Int32Array, 4);
-SetupTypedArray(7, "Float32Array", global.__Float32Array, 4);
-SetupTypedArray(8, "Float64Array", global.__Float64Array, 8);
-
+SetupTypedArray(1, "Uint8Array", global.Uint8Array, 1);
+SetupTypedArray(2, "Int8Array", global.Int8Array, 1);
+SetupTypedArray(3, "Uint16Array", global.Uint16Array, 2);
+SetupTypedArray(4, "Int16Array", global.Int16Array, 2);
+SetupTypedArray(5, "Uint32Array", global.Uint32Array, 4);
+SetupTypedArray(6, "Int32Array", global.Int32Array, 4);
+SetupTypedArray(7, "Float32Array", global.Float32Array, 4);
+SetupTypedArray(8, "Float64Array", global.Float64Array, 8);
+SetupTypedArray(9, "Uint8ClampedArray", global.Uint8ClampedArray, 1);
diff --git a/deps/v8/src/v8.h b/deps/v8/src/v8.h
index b041fc2ba..5ead877e5 100644
--- a/deps/v8/src/v8.h
+++ b/deps/v8/src/v8.h
@@ -48,6 +48,9 @@
#error both DEBUG and NDEBUG are set
#endif
+// TODO(dcarney): remove this
+#define V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
+
// Basic includes
#include "../include/v8.h"
#include "v8globals.h"
diff --git a/deps/v8/src/v8memory.h b/deps/v8/src/v8memory.h
index f71de8207..c72ce7ab7 100644
--- a/deps/v8/src/v8memory.h
+++ b/deps/v8/src/v8memory.h
@@ -64,6 +64,14 @@ class Memory {
return *reinterpret_cast<unsigned*>(addr);
}
+ static intptr_t& intptr_at(Address addr) {
+ return *reinterpret_cast<intptr_t*>(addr);
+ }
+
+ static uintptr_t& uintptr_at(Address addr) {
+ return *reinterpret_cast<uintptr_t*>(addr);
+ }
+
static double& double_at(Address addr) {
return *reinterpret_cast<double*>(addr);
}
diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js
index 83b561859..9266af6f0 100644
--- a/deps/v8/src/v8natives.js
+++ b/deps/v8/src/v8natives.js
@@ -1665,7 +1665,6 @@ function FunctionSourceString(func) {
func = %GetCallTrap(func);
}
- // TODO(wingo): Print source using function* for generators.
if (!IS_FUNCTION(func)) {
throw new $TypeError('Function.prototype.toString is not generic');
}
@@ -1684,7 +1683,8 @@ function FunctionSourceString(func) {
var name = %FunctionNameShouldPrintAsAnonymous(func)
? 'anonymous'
: %FunctionGetName(func);
- return 'function ' + name + source;
+ var head = %FunctionIsGenerator(func) ? 'function* ' : 'function ';
+ return head + name + source;
}
@@ -1766,7 +1766,7 @@ function NewFunction(arg1) { // length == 1
// If the formal parameters string include ) - an illegal
// character - it may make the combined function expression
// compile. We avoid this problem by checking for this early on.
- if (p.indexOf(')') != -1) throw MakeSyntaxError('unable_to_parse',[]);
+ if (p.indexOf(')') != -1) throw MakeSyntaxError('paren_in_arg_string',[]);
// If the formal parameters include an unbalanced block comment, the
// function must be rejected. Since JavaScript does not allow nested
// comments we can include a trailing block comment to catch this.
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index dac2bf01b..baafcf7f3 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -33,8 +33,8 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define MAJOR_VERSION 3
-#define MINOR_VERSION 18
-#define BUILD_NUMBER 4
+#define MINOR_VERSION 19
+#define BUILD_NUMBER 0
#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 5bb1292f2..aec50c911 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -202,16 +202,16 @@ const Register no_reg = { kRegister_no_reg_Code };
#ifdef _WIN64
// Windows calling convention
- const Register arg_reg_1 = rcx;
- const Register arg_reg_2 = rdx;
- const Register arg_reg_3 = r8;
- const Register arg_reg_4 = r9;
+ const Register arg_reg_1 = { kRegister_rcx_Code };
+ const Register arg_reg_2 = { kRegister_rdx_Code };
+ const Register arg_reg_3 = { kRegister_r8_Code };
+ const Register arg_reg_4 = { kRegister_r9_Code };
#else
// AMD64 calling convention
- const Register arg_reg_1 = rdi;
- const Register arg_reg_2 = rsi;
- const Register arg_reg_3 = rdx;
- const Register arg_reg_4 = rcx;
+ const Register arg_reg_1 = { kRegister_rdi_Code };
+ const Register arg_reg_2 = { kRegister_rsi_Code };
+ const Register arg_reg_3 = { kRegister_rdx_Code };
+ const Register arg_reg_4 = { kRegister_rcx_Code };
#endif // _WIN64
struct XMMRegister {
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index 3a9a0234e..f7ded184e 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -30,6 +30,7 @@
#if defined(V8_TARGET_ARCH_X64)
#include "bootstrapper.h"
+#include "builtins-decls.h"
#include "code-stubs.h"
#include "regexp-macro-assembler.h"
#include "stub-cache.h"
@@ -74,6 +75,28 @@ void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
}
+void LoadFieldStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rax };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->stack_parameter_count_ = NULL;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rdx };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->stack_parameter_count_ = NULL;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -102,9 +125,10 @@ static void InitializeArrayConstructorDescriptor(
int constant_stack_parameter_count) {
// register state
// rax -- number of arguments
+ // rdi -- function
// rbx -- type info cell with elements kind
- static Register registers[] = { rbx };
- descriptor->register_param_count_ = 1;
+ static Register registers[] = { rdi, rbx };
+ descriptor->register_param_count_ = 2;
if (constant_stack_parameter_count != 0) {
// stack param count needs (constructor pointer, and single argument)
descriptor->stack_parameter_count_ = &rax;
@@ -2391,12 +2415,6 @@ void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
}
-void LoadFieldStub::Generate(MacroAssembler* masm) {
- StubCompiler::DoGenerateFastPropertyLoad(masm, rax, reg_, inobject_, index_);
- __ ret(0);
-}
-
-
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The key is in rdx and the parameter count is in rax.
@@ -3818,6 +3836,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
Handle<Object> terminal_kind_sentinel =
TypeFeedbackCells::MonomorphicArraySentinel(isolate,
LAST_FAST_ELEMENTS_KIND);
+ __ JumpIfNotSmi(rcx, &miss);
__ Cmp(rcx, terminal_kind_sentinel);
__ j(above, &miss);
// Make sure the function is the Array() function
@@ -4106,22 +4125,23 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// Call C function.
#ifdef _WIN64
- // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9
- // Store Arguments object on stack, below the 4 WIN64 ABI parameter slots.
- __ movq(StackSpaceOperand(0), r14); // argc.
- __ movq(StackSpaceOperand(1), r15); // argv.
+ // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9.
+ // Pass argv and argc as two parameters. The arguments object will
+ // be created by stubs declared by DECLARE_RUNTIME_FUNCTION().
if (result_size_ < 2) {
// Pass a pointer to the Arguments object as the first argument.
// Return result in single register (rax).
- __ lea(rcx, StackSpaceOperand(0));
- __ LoadAddress(rdx, ExternalReference::isolate_address(masm->isolate()));
+ __ movq(rcx, r14); // argc.
+ __ movq(rdx, r15); // argv.
+ __ movq(r8, ExternalReference::isolate_address(masm->isolate()));
} else {
ASSERT_EQ(2, result_size_);
// Pass a pointer to the result location as the first argument.
__ lea(rcx, StackSpaceOperand(2));
// Pass a pointer to the Arguments object as the second argument.
- __ lea(rdx, StackSpaceOperand(0));
- __ LoadAddress(r8, ExternalReference::isolate_address(masm->isolate()));
+ __ movq(rdx, r14); // argc.
+ __ movq(r8, r15); // argv.
+ __ movq(r9, ExternalReference::isolate_address(masm->isolate()));
}
#else // _WIN64
@@ -4906,8 +4926,34 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ movq(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
__ movq(FieldOperand(rcx, ConsString::kHashFieldOffset),
Immediate(String::kEmptyHashField));
+
+ Label skip_write_barrier, after_writing;
+ ExternalReference high_promotion_mode = ExternalReference::
+ new_space_high_promotion_mode_active_address(masm->isolate());
+ __ Load(rbx, high_promotion_mode);
+ __ testb(rbx, Immediate(1));
+ __ j(zero, &skip_write_barrier);
+
+ __ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
+ __ RecordWriteField(rcx,
+ ConsString::kFirstOffset,
+ rax,
+ rbx,
+ kDontSaveFPRegs);
+ __ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
+ __ RecordWriteField(rcx,
+ ConsString::kSecondOffset,
+ rdx,
+ rbx,
+ kDontSaveFPRegs);
+ __ jmp(&after_writing);
+
+ __ bind(&skip_write_barrier);
__ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
__ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
+
+ __ bind(&after_writing);
+
__ movq(rax, rcx);
__ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
@@ -6346,8 +6392,11 @@ struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
{ REG(r11), REG(rax), REG(r15), EMIT_REMEMBERED_SET},
// StoreArrayLiteralElementStub::Generate
{ REG(rbx), REG(rax), REG(rcx), EMIT_REMEMBERED_SET},
- // FastNewClosureStub::Generate
+ // FastNewClosureStub::Generate and
+ // StringAddStub::Generate
{ REG(rcx), REG(rdx), REG(rbx), EMIT_REMEMBERED_SET},
+ // StringAddStub::Generate
+ { REG(rcx), REG(rax), REG(rbx), EMIT_REMEMBERED_SET},
// Null termination.
{ REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
};
@@ -6900,14 +6949,8 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Get the elements kind and case on that.
__ Cmp(rbx, undefined_sentinel);
__ j(equal, &no_info);
- __ movq(rdx, FieldOperand(rbx, kPointerSize));
-
- // There is no info if the call site went megamorphic either
-
- // TODO(mvstanton): Really? I thought if it was the array function that
- // the cell wouldn't get stamped as megamorphic.
- __ Cmp(rdx, TypeFeedbackCells::MegamorphicSentinel(masm->isolate()));
- __ j(equal, &no_info);
+ __ movq(rdx, FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset));
+ __ JumpIfNotSmi(rdx, &no_info);
__ SmiToInteger32(rdx, rdx);
__ jmp(&switch_ready);
__ bind(&no_info);
diff --git a/deps/v8/src/x64/debug-x64.cc b/deps/v8/src/x64/debug-x64.cc
index 1b29e58d5..750d92926 100644
--- a/deps/v8/src/x64/debug-x64.cc
+++ b/deps/v8/src/x64/debug-x64.cc
@@ -233,6 +233,15 @@ void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
}
+void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
+ // Register state for CompareNil IC
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, rax.bit(), 0, false);
+}
+
+
void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
// Register state for IC call call (from ic-x64.cc)
// ----------- S t a t e -------------
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index a20d468ba..19fa0aadd 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -1554,7 +1554,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
? ObjectLiteral::kHasFunction
: ObjectLiteral::kNoFlags;
int properties_count = constant_properties->length() / 2;
- if (expr->depth() > 1) {
+ if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
+ expr->depth() > 1) {
__ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(expr->literal_index()));
@@ -1924,11 +1925,12 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
Label resume;
__ CompareRoot(result_register(), Heap::kTheHoleValueRootIndex);
__ j(not_equal, &resume);
- __ pop(result_register());
if (expr->yield_kind() == Yield::SUSPEND) {
- // TODO(wingo): Box into { value: VALUE, done: false }.
+ EmitReturnIteratorResult(false);
+ } else {
+ __ pop(result_register());
+ EmitReturnSequence();
}
- EmitReturnSequence();
__ bind(&resume);
context()->Plug(result_register());
@@ -1940,18 +1942,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ Move(FieldOperand(result_register(),
JSGeneratorObject::kContinuationOffset),
Smi::FromInt(JSGeneratorObject::kGeneratorClosed));
- __ pop(result_register());
- // TODO(wingo): Box into { value: VALUE, done: true }.
-
- // Exit all nested statements.
- NestedStatement* current = nesting_stack_;
- int stack_depth = 0;
- int context_length = 0;
- while (current != NULL) {
- current = current->Exit(&stack_depth, &context_length);
- }
- __ Drop(stack_depth);
- EmitReturnSequence();
+ EmitReturnIteratorResult(true);
break;
}
@@ -2058,6 +2049,56 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
}
+void FullCodeGenerator::EmitReturnIteratorResult(bool done) {
+ Label gc_required;
+ Label allocated;
+
+ Handle<Map> map(isolate()->native_context()->generator_result_map());
+
+ __ Allocate(map->instance_size(), rax, rcx, rdx, &gc_required, TAG_OBJECT);
+
+ __ bind(&allocated);
+ __ Move(rbx, map);
+ __ pop(rcx);
+ __ Move(rdx, isolate()->factory()->ToBoolean(done));
+ ASSERT_EQ(map->instance_size(), 5 * kPointerSize);
+ __ movq(FieldOperand(rax, HeapObject::kMapOffset), rbx);
+ __ Move(FieldOperand(rax, JSObject::kPropertiesOffset),
+ isolate()->factory()->empty_fixed_array());
+ __ Move(FieldOperand(rax, JSObject::kElementsOffset),
+ isolate()->factory()->empty_fixed_array());
+ __ movq(FieldOperand(rax, JSGeneratorObject::kResultValuePropertyOffset),
+ rcx);
+ __ movq(FieldOperand(rax, JSGeneratorObject::kResultDonePropertyOffset),
+ rdx);
+
+ // Only the value field needs a write barrier, as the other values are in the
+ // root set.
+ __ RecordWriteField(rax, JSGeneratorObject::kResultValuePropertyOffset,
+ rcx, rdx, kDontSaveFPRegs);
+
+ if (done) {
+ // Exit all nested statements.
+ NestedStatement* current = nesting_stack_;
+ int stack_depth = 0;
+ int context_length = 0;
+ while (current != NULL) {
+ current = current->Exit(&stack_depth, &context_length);
+ }
+ __ Drop(stack_depth);
+ }
+
+ EmitReturnSequence();
+
+ __ bind(&gc_required);
+ __ Push(Smi::FromInt(map->instance_size()));
+ __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ movq(context_register(),
+ Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ jmp(&allocated);
+}
+
+
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index 0a9ceaa86..761e05ae4 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -1528,6 +1528,26 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
}
+void StoreIC::GenerateSlow(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+
+ __ pop(rbx);
+ __ push(rdx); // receiver
+ __ push(rcx); // key
+ __ push(rax); // value
+ __ push(rbx); // return address
+
+ // Do tail-call to runtime routine.
+ ExternalReference ref(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : value
diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc
index fbb7c2897..f908ea1ff 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/x64/lithium-codegen-x64.cc
@@ -96,6 +96,10 @@ void LCodeGen::FinishCode(Handle<Code> code) {
prototype_maps_.at(i)->AddDependentCode(
DependentCode::kPrototypeCheckGroup, code);
}
+ for (int i = 0 ; i < transition_maps_.length(); i++) {
+ transition_maps_.at(i)->AddDependentCode(
+ DependentCode::kTransitionGroup, code);
+ }
}
@@ -1033,7 +1037,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ andl(dividend, Immediate(divisor - 1));
__ bind(&done);
} else {
- Label done, remainder_eq_dividend, slow, do_subtraction, both_positive;
+ Label done, remainder_eq_dividend, slow, both_positive;
Register left_reg = ToRegister(instr->left());
Register right_reg = ToRegister(instr->right());
Register result_reg = ToRegister(instr->result());
@@ -1069,23 +1073,10 @@ void LCodeGen::DoModI(LModI* instr) {
__ movl(scratch, right_reg);
__ subl(scratch, Immediate(1));
__ testl(scratch, right_reg);
- __ j(not_zero, &do_subtraction, Label::kNear);
+ __ j(not_zero, &slow, Label::kNear);
__ andl(left_reg, scratch);
__ jmp(&remainder_eq_dividend, Label::kNear);
- __ bind(&do_subtraction);
- const int kUnfolds = 3;
- // Try a few subtractions of the dividend.
- __ movl(scratch, left_reg);
- for (int i = 0; i < kUnfolds; i++) {
- // Reduce the dividend by the divisor.
- __ subl(left_reg, right_reg);
- // Check if the dividend is less than the divisor.
- __ cmpl(left_reg, right_reg);
- __ j(less, &remainder_eq_dividend, Label::kNear);
- }
- __ movl(left_reg, scratch);
-
// Slow case, using idiv instruction.
__ bind(&slow);
@@ -1683,19 +1674,27 @@ void LCodeGen::DoThrow(LThrow* instr) {
void LCodeGen::DoAddI(LAddI* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
- ASSERT(left->Equals(instr->result()));
- if (right->IsConstantOperand()) {
- __ addl(ToRegister(left),
- Immediate(ToInteger32(LConstantOperand::cast(right))));
- } else if (right->IsRegister()) {
- __ addl(ToRegister(left), ToRegister(right));
+ if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
+ if (right->IsConstantOperand()) {
+ int32_t offset = ToInteger32(LConstantOperand::cast(right));
+ __ lea(ToRegister(instr->result()), MemOperand(ToRegister(left), offset));
+ } else {
+ Operand address(ToRegister(left), ToRegister(right), times_1, 0);
+ __ lea(ToRegister(instr->result()), address);
+ }
} else {
- __ addl(ToRegister(left), ToOperand(right));
- }
-
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
+ if (right->IsConstantOperand()) {
+ __ addl(ToRegister(left),
+ Immediate(ToInteger32(LConstantOperand::cast(right))));
+ } else if (right->IsRegister()) {
+ __ addl(ToRegister(left), ToRegister(right));
+ } else {
+ __ addl(ToRegister(left), ToOperand(right));
+ }
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ DeoptimizeIf(overflow, instr->environment());
+ }
}
}
@@ -2677,13 +2676,21 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+ int offset = instr->hydrogen()->offset();
Register object = ToRegister(instr->object());
+ if (FLAG_track_double_fields &&
+ instr->hydrogen()->representation().IsDouble()) {
+ XMMRegister result = ToDoubleRegister(instr->result());
+ __ movsd(result, FieldOperand(object, offset));
+ return;
+ }
+
Register result = ToRegister(instr->result());
if (instr->hydrogen()->is_in_object()) {
- __ movq(result, FieldOperand(object, instr->hydrogen()->offset()));
+ __ movq(result, FieldOperand(object, offset));
} else {
__ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
- __ movq(result, FieldOperand(result, instr->hydrogen()->offset()));
+ __ movq(result, FieldOperand(result, offset));
}
}
@@ -2847,41 +2854,6 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
}
-void LCodeGen::DoLoadElements(LLoadElements* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->object());
- __ movq(result, FieldOperand(input, JSObject::kElementsOffset));
- if (FLAG_debug_code) {
- Label done, ok, fail;
- __ CompareRoot(FieldOperand(result, HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- __ j(equal, &done, Label::kNear);
- __ CompareRoot(FieldOperand(result, HeapObject::kMapOffset),
- Heap::kFixedCOWArrayMapRootIndex);
- __ j(equal, &done, Label::kNear);
- Register temp((result.is(rax)) ? rbx : rax);
- __ push(temp);
- __ movq(temp, FieldOperand(result, HeapObject::kMapOffset));
- __ movzxbq(temp, FieldOperand(temp, Map::kBitField2Offset));
- __ and_(temp, Immediate(Map::kElementsKindMask));
- __ shr(temp, Immediate(Map::kElementsKindShift));
- __ cmpl(temp, Immediate(GetInitialFastElementsKind()));
- __ j(less, &fail, Label::kNear);
- __ cmpl(temp, Immediate(TERMINAL_FAST_ELEMENTS_KIND));
- __ j(less_equal, &ok, Label::kNear);
- __ cmpl(temp, Immediate(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
- __ j(less, &fail, Label::kNear);
- __ cmpl(temp, Immediate(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
- __ j(less_equal, &ok, Label::kNear);
- __ bind(&fail);
- __ Abort("Check for fast or external elements failed");
- __ bind(&ok);
- __ pop(temp);
- __ bind(&done);
- }
-}
-
-
void LCodeGen::DoLoadExternalArrayPointer(
LLoadExternalArrayPointer* instr) {
Register result = ToRegister(instr->result());
@@ -3904,8 +3876,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
__ Set(rax, instr->arity());
__ Move(rbx, instr->hydrogen()->property_cell());
- Object* cell_value = instr->hydrogen()->property_cell()->value();
- ElementsKind kind = static_cast<ElementsKind>(Smi::cast(cell_value)->value());
+ ElementsKind kind = instr->hydrogen()->elements_kind();
if (instr->arity() == 0) {
ArrayNoArgumentConstructorStub stub(kind);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
@@ -3932,16 +3903,42 @@ void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+ Representation representation = instr->representation();
+
Register object = ToRegister(instr->object());
+
int offset = instr->offset();
- if (!instr->transition().is_null()) {
+ Handle<Map> transition = instr->transition();
+
+ if (FLAG_track_fields && representation.IsSmi()) {
+ if (instr->value()->IsConstantOperand()) {
+ LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
+ if (!IsInteger32Constant(operand_value)) {
+ DeoptimizeIf(no_condition, instr->environment());
+ }
+ } else {
+ Register value = ToRegister(instr->value());
+ __ Integer32ToSmi(value, value);
+ }
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ ASSERT(transition.is_null());
+ ASSERT(instr->is_in_object());
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ XMMRegister value = ToDoubleRegister(instr->value());
+ __ movsd(FieldOperand(object, offset), value);
+ return;
+ }
+
+ if (!transition.is_null()) {
+ if (transition->CanBeDeprecated()) {
+ transition_maps_.Add(transition, info()->zone());
+ }
if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
- __ Move(FieldOperand(object, HeapObject::kMapOffset),
- instr->transition());
+ __ Move(FieldOperand(object, HeapObject::kMapOffset), transition);
} else {
Register temp = ToRegister(instr->temp());
- __ Move(kScratchRegister, instr->transition());
+ __ Move(kScratchRegister, transition);
__ movq(FieldOperand(object, HeapObject::kMapOffset), kScratchRegister);
// Update the write barrier for the map field.
__ RecordWriteField(object,
@@ -3977,6 +3974,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
ToRegister(operand_value));
} else {
Handle<Object> handle_value = ToHandle(operand_value);
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
__ Move(FieldOperand(write_register, offset), handle_value);
}
} else {
@@ -4384,6 +4382,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
__ cmpl(char_code, Immediate(String::kMaxOneByteCharCode));
__ j(above, deferred->entry());
+ __ movsxlq(char_code, char_code);
__ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
__ movq(result, FieldOperand(result,
char_code, times_pointer_size,
@@ -4768,6 +4767,8 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
} else {
mode = NUMBER_CANDIDATE_IS_SMI;
}
+ } else {
+ mode = NUMBER_CANDIDATE_IS_SMI;
}
}
@@ -5122,17 +5123,24 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
- Register size = ToRegister(instr->size());
Register result = ToRegister(instr->result());
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
- __ Set(result, 0);
+ __ Move(result, Smi::FromInt(0));
PushSafepointRegistersScope scope(this);
- __ Integer32ToSmi(size, size);
- __ push(size);
+ if (instr->size()->IsRegister()) {
+ Register size = ToRegister(instr->size());
+ ASSERT(!size.is(result));
+ __ Integer32ToSmi(size, size);
+ __ push(size);
+ } else {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ Push(Smi::FromInt(size));
+ }
+
if (instr->hydrogen()->CanAllocateInOldPointerSpace()) {
CallRuntimeFromDeferred(
Runtime::kAllocateInOldPointerSpace, 1, instr);
@@ -5219,7 +5227,8 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
// Set up the parameters to the stub/runtime call and pick the right
// runtime function or stub to call.
int properties_count = instr->hydrogen()->constant_properties_length() / 2;
- if (instr->hydrogen()->depth() > 1) {
+ if ((FLAG_track_double_fields && instr->hydrogen()->may_store_doubles()) ||
+ instr->hydrogen()->depth() > 1) {
__ PushHeapObject(literals);
__ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
__ Push(constant_properties);
diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h
index 887c78895..aa0ab9c0d 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.h
+++ b/deps/v8/src/x64/lithium-codegen-x64.h
@@ -58,6 +58,7 @@ class LCodeGen BASE_EMBEDDED {
jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
prototype_maps_(0, info->zone()),
+ transition_maps_(0, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
@@ -371,6 +372,7 @@ class LCodeGen BASE_EMBEDDED {
ZoneList<JumpTableEntry> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
ZoneList<Handle<Map> > prototype_maps_;
+ ZoneList<Handle<Map> > transition_maps_;
int inlined_function_count_;
Scope* const scope_;
Status status_;
diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc
index 6707455ef..f49f7d67f 100644
--- a/deps/v8/src/x64/lithium-x64.cc
+++ b/deps/v8/src/x64/lithium-x64.cc
@@ -557,6 +557,11 @@ LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
}
+LOperand* LChunkBuilder::UseConstant(HValue* value) {
+ return chunk_->DefineConstantOperand(HConstant::cast(value));
+}
+
+
LOperand* LChunkBuilder::UseAny(HValue* value) {
return value->IsConstant()
? chunk_->DefineConstantOperand(HConstant::cast(value))
@@ -680,7 +685,7 @@ LUnallocated* LChunkBuilder::TempRegister() {
int vreg = allocator_->GetVirtualRegister();
if (!allocator_->AllocationOk()) {
Abort("Out of virtual registers while trying to allocate temp register.");
- return NULL;
+ vreg = 0;
}
operand->set_virtual_register(vreg);
return operand;
@@ -777,8 +782,8 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
ASSERT(op != Token::MOD);
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
return DefineSameAsFirst(result);
}
@@ -1304,8 +1309,8 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
return DefineSameAsFirst(new(zone()) LBitI(left, right));
} else {
ASSERT(instr->representation().IsTagged());
@@ -1468,8 +1473,8 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstant(instr->MostConstantOperand());
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseOrConstant(instr->BetterRightOperand());
LMulI* mul = new(zone()) LMulI(left, right);
if (instr->CheckFlag(HValue::kCanOverflow) ||
instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -1508,13 +1513,24 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
if (instr->representation().IsInteger32()) {
+ // Check to see if it would be advantageous to use an lea instruction rather
+ // than an add. This is the case when no overflow check is needed and there
+ // are multiple uses of the add's inputs, so using a 3-register add will
+ // preserve all input values for later uses.
+ bool use_lea = LAddI::UseLea(instr);
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ HValue* right_candidate = instr->BetterRightOperand();
+ LOperand* right = use_lea
+ ? UseRegisterOrConstantAtStart(right_candidate)
+ : UseOrConstantAtStart(right_candidate);
LAddI* add = new(zone()) LAddI(left, right);
- LInstruction* result = DefineSameAsFirst(add);
- if (instr->CheckFlag(HValue::kCanOverflow)) {
+ bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
+ LInstruction* result = use_lea
+ ? DefineAsRegister(add)
+ : DefineSameAsFirst(add);
+ if (can_overflow) {
result = AssignEnvironment(result);
}
return result;
@@ -1534,8 +1550,8 @@ LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- left = UseRegisterAtStart(instr->LeastConstantOperand());
- right = UseOrConstantAtStart(instr->MostConstantOperand());
+ left = UseRegisterAtStart(instr->BetterLeftOperand());
+ right = UseOrConstantAtStart(instr->BetterRightOperand());
} else {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->left()->representation().IsDouble());
@@ -2022,7 +2038,6 @@ LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
- ASSERT(instr->representation().IsTagged());
LOperand* obj = UseRegisterAtStart(instr->object());
return DefineAsRegister(new(zone()) LLoadNamedField(obj));
}
@@ -2059,12 +2074,6 @@ LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
}
-LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LLoadElements(input));
-}
-
-
LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
HLoadExternalArrayPointer* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
@@ -2115,19 +2124,6 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
}
-// DoStoreKeyed and DoStoreNamedField have special considerations for allowing
-// use of a constant instead of a register.
-static bool StoreConstantValueAllowed(HValue* value) {
- if (value->IsConstant()) {
- HConstant* constant_value = HConstant::cast(value);
- return constant_value->HasSmiValue()
- || constant_value->HasDoubleValue()
- || constant_value->ImmortalImmovable();
- }
- return false;
-}
-
-
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
bool clobbers_key = instr->key()->representation().IsTagged();
@@ -2151,18 +2147,12 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
val = UseTempRegister(instr->value());
key = UseTempRegister(instr->key());
} else {
- if (StoreConstantValueAllowed(instr->value())) {
- val = UseRegisterOrConstantAtStart(instr->value());
- } else {
- val = UseRegisterAtStart(instr->value());
- }
+ val = UseRegisterOrConstantAtStart(instr->value());
if (clobbers_key) {
key = UseTempRegister(instr->key());
- } else if (StoreConstantValueAllowed(instr->key())) {
- key = UseRegisterOrConstantAtStart(instr->key());
} else {
- key = UseRegisterAtStart(instr->key());
+ key = UseRegisterOrConstantAtStart(instr->key());
}
}
}
@@ -2258,11 +2248,20 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
: UseRegisterAtStart(instr->object());
}
+ bool can_be_constant = instr->value()->IsConstant() &&
+ HConstant::cast(instr->value())->NotInNewSpace() &&
+ !(FLAG_track_double_fields && instr->field_representation().IsDouble());
+
LOperand* val;
if (needs_write_barrier) {
val = UseTempRegister(instr->value());
- } else if (StoreConstantValueAllowed(instr->value())) {
+ } else if (can_be_constant) {
val = UseRegisterOrConstant(instr->value());
+ } else if (FLAG_track_fields && instr->field_representation().IsSmi()) {
+ val = UseTempRegister(instr->value());
+ } else if (FLAG_track_double_fields &&
+ instr->field_representation().IsDouble()) {
+ val = UseRegisterAtStart(instr->value());
} else {
val = UseRegister(instr->value());
}
@@ -2272,7 +2271,11 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LOperand* temp = (!instr->is_in_object() || needs_write_barrier ||
needs_write_barrier_for_map) ? TempRegister() : NULL;
- return new(zone()) LStoreNamedField(obj, val, temp);
+ LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp);
+ if (FLAG_track_fields && instr->field_representation().IsSmi()) {
+ return AssignEnvironment(result);
+ }
+ return result;
}
@@ -2323,7 +2326,9 @@ LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling();
- LOperand* size = UseTempRegister(instr->size());
+ LOperand* size = instr->size()->IsConstant()
+ ? UseConstant(instr->size())
+ : UseTempRegister(instr->size());
LOperand* temp = TempRegister();
LAllocate* result = new(zone()) LAllocate(size, temp);
return AssignPointerMap(DefineAsRegister(result));
@@ -2384,7 +2389,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
- if (spill_index > LUnallocated::kMaxFixedIndex) {
+ if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
Abort("Too many spill slots needed for OSR");
spill_index = 0;
}
diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h
index 9154b04cf..d1f7e7608 100644
--- a/deps/v8/src/x64/lithium-x64.h
+++ b/deps/v8/src/x64/lithium-x64.h
@@ -123,7 +123,6 @@ class LCodeGen;
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
- V(LoadElements) \
V(LoadExternalArrayPointer) \
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
@@ -1346,6 +1345,11 @@ class LAddI: public LTemplateInstruction<1, 2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
+ static bool UseLea(HAdd* add) {
+ return !add->CheckFlag(HValue::kCanOverflow) &&
+ add->BetterLeftOperand()->UseCount() > 1;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
DECLARE_HYDROGEN_ACCESSOR(Add)
};
@@ -1512,18 +1516,6 @@ class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadElements: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadElements(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
-};
-
-
class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadExternalArrayPointer(LOperand* object) {
@@ -2063,6 +2055,9 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
Handle<Map> transition() const { return hydrogen()->transition(); }
+ Representation representation() const {
+ return hydrogen()->field_representation();
+ }
};
@@ -2674,6 +2669,9 @@ class LChunkBuilder BASE_EMBEDDED {
MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+ // An input operand in a constant operand.
+ MUST_USE_RESULT LOperand* UseConstant(HValue* value);
+
// An input operand in register, stack slot or a constant operand.
// Will not be moved to a register even if one is freely available.
MUST_USE_RESULT LOperand* UseAny(HValue* value);
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 76491a331..691894c0e 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -4066,9 +4066,33 @@ void MacroAssembler::AllocateAsciiConsString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
- // Allocate heap number in new space.
- Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ Label allocate_new_space, install_map;
+ AllocationFlags flags = TAG_OBJECT;
+
+ ExternalReference high_promotion_mode = ExternalReference::
+ new_space_high_promotion_mode_active_address(isolate());
+
+ Load(scratch1, high_promotion_mode);
+ testb(scratch1, Immediate(1));
+ j(zero, &allocate_new_space);
+ Allocate(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
+
+ jmp(&install_map);
+
+ bind(&allocate_new_space);
+ Allocate(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ flags);
+
+ bind(&install_map);
// Set the map. The other fields are left uninitialized.
LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex);
@@ -4414,6 +4438,19 @@ void MacroAssembler::CheckPageFlag(
}
+void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated) {
+ if (map->CanBeDeprecated()) {
+ Move(scratch, map);
+ movq(scratch, FieldOperand(scratch, Map::kBitField3Offset));
+ SmiToInteger32(scratch, scratch);
+ and_(scratch, Immediate(Map::Deprecated::kMask));
+ j(not_zero, if_deprecated);
+ }
+}
+
+
void MacroAssembler::JumpIfBlack(Register object,
Register bitmap_scratch,
Register mask_scratch,
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index f640beb3e..76941ffba 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -168,6 +168,10 @@ class MacroAssembler: public Assembler {
Label* condition_met,
Label::Distance condition_met_distance = Label::kFar);
+ void CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated);
+
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but scratch will be clobbered.
void JumpIfNotInNewSpace(Register object,
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index 54d2a113a..a7faf9b66 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -343,11 +343,13 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
}
-void StubCompiler::DoGenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst,
- Register src,
- bool inobject,
- int index) {
+void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
+ Register dst,
+ Register src,
+ bool inobject,
+ int index,
+ Representation representation) {
+ ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
int offset = index * kPointerSize;
if (!inobject) {
// Calculate the offset into the properties array.
@@ -745,8 +747,10 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
Register value_reg,
Register scratch1,
Register scratch2,
+ Register unused,
Label* miss_label,
- Label* miss_restore_name) {
+ Label* miss_restore_name,
+ Label* slow) {
// Check that the map of the object hasn't changed.
__ CheckMap(receiver_reg, Handle<Map>(object->map()),
miss_label, DO_SMI_CHECK, REQUIRE_EXACT_MAP);
@@ -756,6 +760,15 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
}
+ int descriptor = transition->LastAdded();
+ DescriptorArray* descriptors = transition->instance_descriptors();
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+ Representation representation = details.representation();
+ ASSERT(!representation.IsNone());
+
+ // Ensure no transitions to deprecated maps are followed.
+ __ CheckMapDeprecated(transition, scratch1, miss_label);
+
// Check that we are allowed to write this.
if (object->GetPrototype()->IsJSObject()) {
JSObject* holder;
@@ -771,7 +784,7 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
}
Register holder_reg = CheckPrototypes(
object, receiver_reg, Handle<JSObject>(holder), name_reg,
- scratch1, scratch2, name, miss_restore_name);
+ scratch1, scratch2, name, miss_restore_name, SKIP_RECEIVER);
// If no property was found, and the holder (the last object in the
// prototype chain) is in slow mode, we need to do a negative lookup on the
// holder.
@@ -790,6 +803,28 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
}
}
+ Register storage_reg = name_reg;
+
+ if (FLAG_track_fields && representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_restore_name);
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ Label do_store, heap_number;
+ __ AllocateHeapNumber(storage_reg, scratch1, slow);
+
+ __ JumpIfNotSmi(value_reg, &heap_number);
+ __ SmiToInteger32(scratch1, value_reg);
+ __ cvtlsi2sd(xmm0, scratch1);
+ __ jmp(&do_store);
+
+ __ bind(&heap_number);
+ __ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
+ miss_restore_name, DONT_DO_SMI_CHECK, REQUIRE_EXACT_MAP);
+ __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
+
+ __ bind(&do_store);
+ __ movsd(FieldOperand(storage_reg, HeapNumber::kValueOffset), xmm0);
+ }
+
// Stub never generated for non-global objects that require access
// checks.
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
@@ -815,12 +850,11 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ Move(scratch1, transition);
__ movq(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
- // Update the write barrier for the map field and pass the now unused
- // name_reg as scratch register.
+ // Update the write barrier for the map field.
__ RecordWriteField(receiver_reg,
HeapObject::kMapOffset,
scratch1,
- name_reg,
+ scratch2,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
@@ -837,25 +871,45 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
- __ movq(FieldOperand(receiver_reg, offset), value_reg);
+ if (FLAG_track_double_fields && representation.IsDouble()) {
+ __ movq(FieldOperand(receiver_reg, offset), storage_reg);
+ } else {
+ __ movq(FieldOperand(receiver_reg, offset), value_reg);
+ }
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ movq(name_reg, value_reg);
- __ RecordWriteField(
- receiver_reg, offset, name_reg, scratch1, kDontSaveFPRegs);
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ __ movq(name_reg, value_reg);
+ } else {
+ ASSERT(storage_reg.is(name_reg));
+ }
+ __ RecordWriteField(
+ receiver_reg, offset, name_reg, scratch1, kDontSaveFPRegs);
+ }
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array (optimistically).
__ movq(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ movq(FieldOperand(scratch1, offset), value_reg);
+ if (FLAG_track_double_fields && representation.IsDouble()) {
+ __ movq(FieldOperand(scratch1, offset), storage_reg);
+ } else {
+ __ movq(FieldOperand(scratch1, offset), value_reg);
+ }
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ movq(name_reg, value_reg);
- __ RecordWriteField(
- scratch1, offset, name_reg, receiver_reg, kDontSaveFPRegs);
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ __ movq(name_reg, value_reg);
+ } else {
+ ASSERT(storage_reg.is(name_reg));
+ }
+ __ RecordWriteField(
+ scratch1, offset, name_reg, receiver_reg, kDontSaveFPRegs);
+ }
}
// Return the value (register rax).
@@ -895,17 +949,55 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// object and the number of in-object properties is not going to change.
index -= object->map()->inobject_properties();
+ Representation representation = lookup->representation();
+ ASSERT(!representation.IsNone());
+ if (FLAG_track_fields && representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ // Load the double storage.
+ if (index < 0) {
+ int offset = object->map()->instance_size() + (index * kPointerSize);
+ __ movq(scratch1, FieldOperand(receiver_reg, offset));
+ } else {
+ __ movq(scratch1,
+ FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ __ movq(scratch1, FieldOperand(scratch1, offset));
+ }
+
+ // Store the value into the storage.
+ Label do_store, heap_number;
+ __ JumpIfNotSmi(value_reg, &heap_number);
+ __ SmiToInteger32(scratch2, value_reg);
+ __ cvtlsi2sd(xmm0, scratch2);
+ __ jmp(&do_store);
+
+ __ bind(&heap_number);
+ __ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
+ miss_label, DONT_DO_SMI_CHECK, REQUIRE_EXACT_MAP);
+ __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
+
+ __ bind(&do_store);
+ __ movsd(FieldOperand(scratch1, HeapNumber::kValueOffset), xmm0);
+ // Return the value (register rax).
+ ASSERT(value_reg.is(rax));
+ __ ret(0);
+ return;
+ }
+
// TODO(verwaest): Share this code as a code stub.
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
__ movq(FieldOperand(receiver_reg, offset), value_reg);
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ movq(name_reg, value_reg);
- __ RecordWriteField(
- receiver_reg, offset, name_reg, scratch1, kDontSaveFPRegs);
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ __ movq(name_reg, value_reg);
+ __ RecordWriteField(
+ receiver_reg, offset, name_reg, scratch1, kDontSaveFPRegs);
+ }
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
@@ -913,11 +1005,13 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
__ movq(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
__ movq(FieldOperand(scratch1, offset), value_reg);
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ movq(name_reg, value_reg);
- __ RecordWriteField(
- scratch1, offset, name_reg, receiver_reg, kDontSaveFPRegs);
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ __ movq(name_reg, value_reg);
+ __ RecordWriteField(
+ scratch1, offset, name_reg, receiver_reg, kDontSaveFPRegs);
+ }
}
// Return the value (register rax).
@@ -1157,10 +1251,20 @@ void BaseLoadStubCompiler::NonexistentHandlerFrontend(
void BaseLoadStubCompiler::GenerateLoadField(Register reg,
Handle<JSObject> holder,
- PropertyIndex index) {
- // Get the value from the properties.
- GenerateFastPropertyLoad(masm(), rax, reg, holder, index);
- __ ret(0);
+ PropertyIndex field,
+ Representation representation) {
+ if (!reg.is(receiver())) __ movq(receiver(), reg);
+ if (kind() == Code::LOAD_IC) {
+ LoadFieldStub stub(field.is_inobject(holder),
+ field.translate(holder),
+ representation);
+ GenerateTailCall(masm(), stub.GetCode(isolate()));
+ } else {
+ KeyedLoadFieldStub stub(field.is_inobject(holder),
+ field.translate(holder),
+ representation);
+ GenerateTailCall(masm(), stub.GetCode(isolate()));
+ }
}
@@ -1415,7 +1519,8 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
Register reg = CheckPrototypes(object, rdx, holder, rbx, rax, rdi,
name, &miss);
- GenerateFastPropertyLoad(masm(), rdi, reg, holder, index);
+ GenerateFastPropertyLoad(masm(), rdi, reg, index.is_inobject(holder),
+ index.translate(holder), Representation::Tagged());
// Check that the function really is a function.
__ JumpIfSmi(rdi, &miss);
@@ -2807,18 +2912,24 @@ Handle<Code> BaseLoadStubCompiler::CompilePolymorphicIC(
Register map_reg = scratch1();
__ movq(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
int receiver_count = receiver_maps->length();
+ int number_of_handled_maps = 0;
for (int current = 0; current < receiver_count; ++current) {
- // Check map and tail call if there's a match
- __ Cmp(map_reg, receiver_maps->at(current));
- __ j(equal, handlers->at(current), RelocInfo::CODE_TARGET);
+ Handle<Map> map = receiver_maps->at(current);
+ if (!map->is_deprecated()) {
+ number_of_handled_maps++;
+ // Check map and tail call if there's a match
+ __ Cmp(map_reg, receiver_maps->at(current));
+ __ j(equal, handlers->at(current), RelocInfo::CODE_TARGET);
+ }
}
+ ASSERT(number_of_handled_maps > 0);
__ bind(&miss);
TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
InlineCacheState state =
- receiver_maps->length() > 1 ? POLYMORPHIC : MONOMORPHIC;
+ number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
return GetICCode(kind(), type, name, state);
}