summaryrefslogtreecommitdiff
path: root/deps/v8/test/cctest
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/test/cctest')
-rw-r--r--deps/v8/test/cctest/cctest.gyp15
-rw-r--r--deps/v8/test/cctest/cctest.h74
-rw-r--r--deps/v8/test/cctest/cctest.status61
-rw-r--r--deps/v8/test/cctest/test-accessors.cc15
-rw-r--r--deps/v8/test/cctest/test-api.cc1339
-rw-r--r--deps/v8/test/cctest/test-assembler-arm.cc21
-rw-r--r--deps/v8/test/cctest/test-assembler-arm64.cc10801
-rw-r--r--deps/v8/test/cctest/test-assembler-x64.cc76
-rw-r--r--deps/v8/test/cctest/test-atomicops.cc276
-rw-r--r--deps/v8/test/cctest/test-code-stubs-arm64.cc189
-rw-r--r--deps/v8/test/cctest/test-code-stubs-x64.cc24
-rw-r--r--deps/v8/test/cctest/test-code-stubs.cc33
-rw-r--r--deps/v8/test/cctest/test-compiler.cc5
-rw-r--r--deps/v8/test/cctest/test-constantpool.cc40
-rw-r--r--deps/v8/test/cctest/test-cpu-profiler.cc16
-rw-r--r--deps/v8/test/cctest/test-date.cc22
-rw-r--r--deps/v8/test/cctest/test-debug.cc144
-rw-r--r--deps/v8/test/cctest/test-decls.cc3
-rw-r--r--deps/v8/test/cctest/test-deoptimization.cc2
-rw-r--r--deps/v8/test/cctest/test-disasm-arm.cc12
-rw-r--r--deps/v8/test/cctest/test-disasm-arm64.cc1763
-rw-r--r--deps/v8/test/cctest/test-disasm-ia32.cc15
-rw-r--r--deps/v8/test/cctest/test-disasm-x64.cc110
-rw-r--r--deps/v8/test/cctest/test-fuzz-arm64.cc71
-rw-r--r--deps/v8/test/cctest/test-hashing.cc70
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc167
-rw-r--r--deps/v8/test/cctest/test-heap.cc375
-rw-r--r--deps/v8/test/cctest/test-javascript-arm64.cc266
-rw-r--r--deps/v8/test/cctest/test-js-arm64-variables.cc143
-rw-r--r--deps/v8/test/cctest/test-log.cc4
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-ia32.cc28
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-mips.cc42
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-x64.cc140
-rw-r--r--deps/v8/test/cctest/test-mark-compact.cc10
-rw-r--r--deps/v8/test/cctest/test-mementos.cc74
-rw-r--r--deps/v8/test/cctest/test-microtask-delivery.cc135
-rw-r--r--deps/v8/test/cctest/test-object-observe.cc194
-rw-r--r--deps/v8/test/cctest/test-parsing.cc737
-rw-r--r--deps/v8/test/cctest/test-platform.cc6
-rw-r--r--deps/v8/test/cctest/test-regexp.cc35
-rw-r--r--deps/v8/test/cctest/test-strings.cc97
-rw-r--r--deps/v8/test/cctest/test-symbols.cc2
-rw-r--r--deps/v8/test/cctest/test-types.cc247
-rw-r--r--deps/v8/test/cctest/test-utils-arm64.cc425
-rw-r--r--deps/v8/test/cctest/test-utils-arm64.h233
-rw-r--r--deps/v8/test/cctest/testcfg.py6
46 files changed, 17283 insertions, 1280 deletions
diff --git a/deps/v8/test/cctest/cctest.gyp b/deps/v8/test/cctest/cctest.gyp
index 996db3eea..ec5b08dd2 100644
--- a/deps/v8/test/cctest/cctest.gyp
+++ b/deps/v8/test/cctest/cctest.gyp
@@ -53,6 +53,7 @@
'test-alloc.cc',
'test-api.cc',
'test-ast.cc',
+ 'test-atomicops.cc',
'test-bignum.cc',
'test-bignum-dtoa.cc',
'test-circular-queue.cc',
@@ -88,6 +89,7 @@
'test-liveedit.cc',
'test-lockers.cc',
'test-log.cc',
+ 'test-microtask-delivery.cc',
'test-mark-compact.cc',
'test-mementos.cc',
'test-mutex.cc',
@@ -138,6 +140,7 @@
'test-code-stubs.cc',
'test-code-stubs-x64.cc',
'test-cpu-x64.cc',
+ 'test-disasm-x64.cc',
'test-macro-assembler-x64.cc',
'test-log-stack-tracer.cc'
],
@@ -151,6 +154,18 @@
'test-macro-assembler-arm.cc'
],
}],
+ ['v8_target_arch=="arm64"', {
+ 'sources': [
+ 'test-utils-arm64.cc',
+ 'test-assembler-arm64.cc',
+ 'test-code-stubs.cc',
+ 'test-code-stubs-arm64.cc',
+ 'test-disasm-arm64.cc',
+ 'test-fuzz-arm64.cc',
+ 'test-javascript-arm64.cc',
+ 'test-js-arm64-variables.cc'
+ ],
+ }],
['v8_target_arch=="mipsel"', {
'sources': [
'test-assembler-mips.cc',
diff --git a/deps/v8/test/cctest/cctest.h b/deps/v8/test/cctest/cctest.h
index d9f76294e..635983523 100644
--- a/deps/v8/test/cctest/cctest.h
+++ b/deps/v8/test/cctest/cctest.h
@@ -308,27 +308,89 @@ static inline v8::Local<v8::Script> v8_compile(const char* x) {
}
-// Helper function that compiles and runs the source.
+static inline v8::Local<v8::Script> v8_compile(v8::Local<v8::String> x) {
+ return v8::Script::Compile(x);
+}
+
+
+static inline v8::Local<v8::Script> CompileWithOrigin(
+ v8::Local<v8::String> source, v8::Local<v8::String> origin_url) {
+ v8::ScriptOrigin origin(origin_url);
+ v8::ScriptCompiler::Source script_source(source, origin);
+ return v8::ScriptCompiler::Compile(
+ v8::Isolate::GetCurrent(), &script_source);
+}
+
+
+static inline v8::Local<v8::Script> CompileWithOrigin(
+ v8::Local<v8::String> source, const char* origin_url) {
+ return CompileWithOrigin(source, v8_str(origin_url));
+}
+
+
+static inline v8::Local<v8::Script> CompileWithOrigin(const char* source,
+ const char* origin_url) {
+ return CompileWithOrigin(v8_str(source), v8_str(origin_url));
+}
+
+
+// Helper functions that compile and run the source.
static inline v8::Local<v8::Value> CompileRun(const char* source) {
- return v8::Script::Compile(
- v8::String::NewFromUtf8(v8::Isolate::GetCurrent(), source))->Run();
+ return v8::Script::Compile(v8_str(source))->Run();
}
-// Helper function that compiles and runs the source with given origin.
+static inline v8::Local<v8::Value> CompileRun(v8::Local<v8::String> source) {
+ return v8::Script::Compile(source)->Run();
+}
+
+
+static inline v8::Local<v8::Value> PreCompileCompileRun(const char* source) {
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Local<v8::String> source_string =
+ v8::String::NewFromUtf8(isolate, source);
+ v8::ScriptData* preparse = v8::ScriptData::PreCompile(source_string);
+ v8::ScriptCompiler::Source script_source(
+ source_string, new v8::ScriptCompiler::CachedData(
+ reinterpret_cast<const uint8_t*>(preparse->Data()),
+ preparse->Length()));
+ v8::Local<v8::Script> script =
+ v8::ScriptCompiler::Compile(isolate, &script_source);
+ v8::Local<v8::Value> result = script->Run();
+ delete preparse;
+ return result;
+}
+
+
+// Helper functions that compile and run the source with given origin.
static inline v8::Local<v8::Value> CompileRunWithOrigin(const char* source,
const char* origin_url,
int line_number,
int column_number) {
v8::Isolate* isolate = v8::Isolate::GetCurrent();
- v8::ScriptOrigin origin(v8::String::NewFromUtf8(isolate, origin_url),
+ v8::ScriptOrigin origin(v8_str(origin_url),
v8::Integer::New(isolate, line_number),
v8::Integer::New(isolate, column_number));
- return v8::Script::Compile(v8::String::NewFromUtf8(isolate, source), &origin)
+ v8::ScriptCompiler::Source script_source(v8_str(source), origin);
+ return v8::ScriptCompiler::Compile(isolate, &script_source)->Run();
+}
+
+
+static inline v8::Local<v8::Value> CompileRunWithOrigin(
+ v8::Local<v8::String> source, const char* origin_url) {
+ v8::ScriptCompiler::Source script_source(
+ source, v8::ScriptOrigin(v8_str(origin_url)));
+ return v8::ScriptCompiler::Compile(v8::Isolate::GetCurrent(), &script_source)
->Run();
}
+static inline v8::Local<v8::Value> CompileRunWithOrigin(
+ const char* source, const char* origin_url) {
+ return CompileRunWithOrigin(v8_str(source), origin_url);
+}
+
+
// Pick a slightly different port to allow tests to be run in parallel.
static inline int FlagDependentPortOffset() {
return ::v8::internal::FLAG_crankshaft == false ? 100 :
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index 721f1eb4f..2f09743e2 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -46,6 +46,10 @@
# This test always fails. It tests that LiveEdit causes abort when turned off.
'test-debug/LiveEditDisabled': [FAIL],
+ # This test always fails. It tests that DisallowJavascriptExecutionScope
+ # works as intended.
+ 'test-api/DisallowJavascriptExecutionScope': [FAIL],
+
# TODO(gc): Temporarily disabled in the GC branch.
'test-log/EquivalenceOfLoggingAndTraversal': [PASS, FAIL],
@@ -61,15 +65,53 @@
# are actually 13 * 38 * 5 * 128 = 316160 individual tests hidden here.
'test-parsing/ParserSync': [PASS, NO_VARIANTS],
+ # BUG(2999).
+ 'test-cpu-profiler/CollectCpuProfile': [PASS, FLAKY],
+
############################################################################
# Slow tests.
'test-api/Threading1': [PASS, ['mode == debug', SLOW]],
'test-api/Threading2': [PASS, ['mode == debug', SLOW]],
'test-api/Threading3': [PASS, ['mode == debug', SLOW]],
'test-api/Threading4': [PASS, ['mode == debug', SLOW]],
+ 'test-strings/StringOOM*': [PASS, ['mode == debug', SKIP]],
}], # ALWAYS
##############################################################################
+['arch == arm64', {
+
+ 'test-api/Bug618': [PASS],
+
+ # BUG(v8:2999).
+ 'test-cpu-profiler/CollectCpuProfile': [PASS, FAIL],
+
+ # BUG(v8:3154).
+ 'test-heap/ReleaseOverReservedPages': [PASS, ['mode == debug', FAIL]],
+
+ # BUG(v8:3155).
+ 'test-strings/AsciiArrayJoin': [PASS, ['mode == debug', FAIL]],
+}], # 'arch == arm64'
+
+['arch == arm64 and simulator_run == True', {
+
+ # Pass but take too long with the simulator.
+ 'test-api/ExternalArrays': [PASS, TIMEOUT],
+ 'test-api/Threading1': [SKIP],
+}], # 'arch == arm64 and simulator_run == True'
+
+['arch == arm64 and mode == debug and simulator_run == True', {
+
+ # Pass but take too long with the simulator in debug mode.
+ 'test-api/ExternalDoubleArray': [SKIP],
+ 'test-api/ExternalFloat32Array': [SKIP],
+ 'test-api/ExternalFloat64Array': [SKIP],
+ 'test-api/ExternalFloatArray': [SKIP],
+ 'test-api/Float32Array': [SKIP],
+ 'test-api/Float64Array': [SKIP],
+ 'test-debug/DebugBreakLoop': [SKIP],
+}], # 'arch == arm64 and mode == debug and simulator_run == True'
+
+##############################################################################
['asan == True', {
# Skip tests not suitable for ASAN.
'test-assembler-x64/AssemblerX64XchglOperations': [SKIP],
@@ -77,6 +119,13 @@
}], # 'asan == True'
##############################################################################
+# This should be 'nosnap == True': issue 3216 to add 'nosnap'.
+[ALWAYS, {
+ # BUG(3215)
+ 'test-lockers/MultithreadedParallelIsolates': [PASS, FAIL],
+}], # 'nosnap == True'
+
+##############################################################################
['system == windows', {
# BUG(2999).
@@ -102,12 +151,6 @@
##############################################################################
['arch == arm', {
- # We cannot assume that we can throw OutOfMemory exceptions in all situations.
- # Apparently our ARM box is in such a state. Skip the test as it also runs for
- # a long time.
- 'test-api/OutOfMemory': [SKIP],
- 'test-api/OutOfMemoryNested': [SKIP],
-
# BUG(355): Test crashes on ARM.
'test-log/ProfLazyMode': [SKIP],
@@ -117,9 +160,6 @@
'test-serialize/DeserializeAndRunScript2': [SKIP],
'test-serialize/DeserializeFromSecondSerialization': [SKIP],
- # BUG(2999).
- 'test-cpu-profiler/CollectCpuProfile': [PASS, FLAKY],
-
############################################################################
# Slow tests.
'test-api/Threading1': [PASS, SLOW],
@@ -183,5 +223,8 @@
# BUG(2998).
'test-macro-assembler-arm/LoadAndStoreWithRepresentation': [SKIP],
+
+ # BUG(3150).
+ 'test-api/PreCompileInvalidPreparseDataError': [SKIP],
}], # 'arch == nacl_ia32 or arch == nacl_x64'
]
diff --git a/deps/v8/test/cctest/test-accessors.cc b/deps/v8/test/cctest/test-accessors.cc
index bda09f01a..daafb244e 100644
--- a/deps/v8/test/cctest/test-accessors.cc
+++ b/deps/v8/test/cctest/test-accessors.cc
@@ -174,6 +174,7 @@ static void XSetter(Local<Value> value, const Info& info, int offset) {
CHECK_EQ(x_holder, info.This());
CHECK_EQ(x_holder, info.Holder());
x_register[offset] = value->Int32Value();
+ info.GetReturnValue().Set(v8_num(-1));
}
@@ -210,20 +211,20 @@ THREADED_TEST(AccessorIC) {
"var key_1 = 'x1';"
"for (var j = 0; j < 10; j++) {"
" var i = 4*j;"
- " holder.x0 = i;"
+ " result.push(holder.x0 = i);"
" result.push(obj.x0);"
- " holder.x1 = i + 1;"
+ " result.push(holder.x1 = i + 1);"
" result.push(obj.x1);"
- " holder[key_0] = i + 2;"
+ " result.push(holder[key_0] = i + 2);"
" result.push(obj[key_0]);"
- " holder[key_1] = i + 3;"
+ " result.push(holder[key_1] = i + 3);"
" result.push(obj[key_1]);"
"}"
"result"));
- CHECK_EQ(40, array->Length());
- for (int i = 0; i < 40; i++) {
+ CHECK_EQ(80, array->Length());
+ for (int i = 0; i < 80; i++) {
v8::Handle<Value> entry = array->Get(v8::Integer::New(isolate, i));
- CHECK_EQ(v8::Integer::New(isolate, i), entry);
+ CHECK_EQ(v8::Integer::New(isolate, i/2), entry);
}
}
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index e58612705..5ee43d3e0 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -50,6 +50,7 @@
#include "unicode-inl.h"
#include "utils.h"
#include "vm-state.h"
+#include "../include/v8-util.h"
static const bool kLogThreading = false;
@@ -204,9 +205,8 @@ THREADED_TEST(Handles) {
CHECK(!undef.IsEmpty());
CHECK(undef->IsUndefined());
- const char* c_source = "1 + 2 + 3";
- Local<String> source = String::NewFromUtf8(CcTest::isolate(), c_source);
- Local<Script> script = Script::Compile(source);
+ const char* source = "1 + 2 + 3";
+ Local<Script> script = v8_compile(source);
CHECK_EQ(6, script->Run()->Int32Value());
local_env->Exit();
@@ -445,9 +445,8 @@ THREADED_TEST(AccessElement) {
THREADED_TEST(Script) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- const char* c_source = "1 + 2 + 3";
- Local<String> source = String::NewFromUtf8(env->GetIsolate(), c_source);
- Local<Script> script = Script::Compile(source);
+ const char* source = "1 + 2 + 3";
+ Local<Script> script = v8_compile(source);
CHECK_EQ(6, script->Run()->Int32Value());
}
@@ -526,7 +525,7 @@ THREADED_TEST(ScriptUsingStringResource) {
v8::HandleScope scope(env->GetIsolate());
TestResource* resource = new TestResource(two_byte_source, &dispose_count);
Local<String> source = String::NewExternal(env->GetIsolate(), resource);
- Local<Script> script = Script::Compile(source);
+ Local<Script> script = v8_compile(source);
Local<Value> value = script->Run();
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value());
@@ -562,7 +561,7 @@ THREADED_TEST(ScriptUsingAsciiStringResource) {
CHECK_EQ(static_cast<const String::ExternalStringResourceBase*>(resource),
source->GetExternalStringResourceBase(&encoding));
CHECK_EQ(String::ASCII_ENCODING, encoding);
- Local<Script> script = Script::Compile(source);
+ Local<Script> script = v8_compile(source);
Local<Value> value = script->Run();
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value());
@@ -594,7 +593,7 @@ THREADED_TEST(ScriptMakingExternalString) {
bool success = source->MakeExternal(new TestResource(two_byte_source,
&dispose_count));
CHECK(success);
- Local<Script> script = Script::Compile(source);
+ Local<Script> script = v8_compile(source);
Local<Value> value = script->Run();
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value());
@@ -620,7 +619,7 @@ THREADED_TEST(ScriptMakingExternalAsciiString) {
bool success = source->MakeExternal(
new TestAsciiResource(i::StrDup(c_source), &dispose_count));
CHECK(success);
- Local<Script> script = Script::Compile(source);
+ Local<Script> script = v8_compile(source);
Local<Value> value = script->Run();
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value());
@@ -764,7 +763,7 @@ THREADED_TEST(UsingExternalString) {
CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in survivor space now
CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in old gen now
i::Handle<i::String> isymbol =
- factory->InternalizedStringFromString(istring);
+ factory->InternalizeString(istring);
CHECK(isymbol->IsInternalizedString());
}
CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
@@ -784,7 +783,7 @@ THREADED_TEST(UsingExternalAsciiString) {
CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in survivor space now
CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in old gen now
i::Handle<i::String> isymbol =
- factory->InternalizedStringFromString(istring);
+ factory->InternalizeString(istring);
CHECK(isymbol->IsInternalizedString());
}
CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
@@ -871,7 +870,7 @@ TEST(ExternalStringWithDisposeHandling) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
Local<String> source = String::NewExternal(env->GetIsolate(), &res_stack);
- Local<Script> script = Script::Compile(source);
+ Local<Script> script = v8_compile(source);
Local<Value> value = script->Run();
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value());
@@ -892,7 +891,7 @@ TEST(ExternalStringWithDisposeHandling) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
Local<String> source = String::NewExternal(env->GetIsolate(), res_heap);
- Local<Script> script = Script::Compile(source);
+ Local<Script> script = v8_compile(source);
Local<Value> value = script->Run();
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value());
@@ -944,7 +943,7 @@ THREADED_TEST(StringConcat) {
env->GetIsolate(),
new TestResource(AsciiToTwoByteString(two_byte_extern_2)));
source = String::Concat(source, right);
- Local<Script> script = Script::Compile(source);
+ Local<Script> script = v8_compile(source);
Local<Value> value = script->Run();
CHECK(value->IsNumber());
CHECK_EQ(68, value->Int32Value());
@@ -2397,23 +2396,23 @@ THREADED_PROFILED_TEST(PropertyHandlerInPrototype) {
env->Global()->Set(v8_str("obj"), bottom);
// Indexed and named get.
- Script::Compile(v8_str("obj[0]"))->Run();
- Script::Compile(v8_str("obj.x"))->Run();
+ CompileRun("obj[0]");
+ CompileRun("obj.x");
// Indexed and named set.
- Script::Compile(v8_str("obj[1] = 42"))->Run();
- Script::Compile(v8_str("obj.y = 42"))->Run();
+ CompileRun("obj[1] = 42");
+ CompileRun("obj.y = 42");
// Indexed and named query.
- Script::Compile(v8_str("0 in obj"))->Run();
- Script::Compile(v8_str("'x' in obj"))->Run();
+ CompileRun("0 in obj");
+ CompileRun("'x' in obj");
// Indexed and named deleter.
- Script::Compile(v8_str("delete obj[0]"))->Run();
- Script::Compile(v8_str("delete obj.x"))->Run();
+ CompileRun("delete obj[0]");
+ CompileRun("delete obj.x");
// Enumerators.
- Script::Compile(v8_str("for (var p in obj) ;"))->Run();
+ CompileRun("for (var p in obj) ;");
}
@@ -2444,13 +2443,12 @@ THREADED_TEST(PrePropertyHandler) {
0,
PrePropertyHandlerQuery);
LocalContext env(NULL, desc->InstanceTemplate());
- Script::Compile(v8_str(
- "var pre = 'Object: pre'; var on = 'Object: on';"))->Run();
- v8::Handle<Value> result_pre = Script::Compile(v8_str("pre"))->Run();
+ CompileRun("var pre = 'Object: pre'; var on = 'Object: on';");
+ v8::Handle<Value> result_pre = CompileRun("pre");
CHECK_EQ(v8_str("PrePropertyHandler: pre"), result_pre);
- v8::Handle<Value> result_on = Script::Compile(v8_str("on"))->Run();
+ v8::Handle<Value> result_on = CompileRun("on");
CHECK_EQ(v8_str("Object: on"), result_on);
- v8::Handle<Value> result_post = Script::Compile(v8_str("post"))->Run();
+ v8::Handle<Value> result_post = CompileRun("post");
CHECK(result_post.IsEmpty());
}
@@ -2458,8 +2456,7 @@ THREADED_TEST(PrePropertyHandler) {
THREADED_TEST(UndefinedIsNotEnumerable) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Handle<Value> result = Script::Compile(v8_str(
- "this.propertyIsEnumerable(undefined)"))->Run();
+ v8::Handle<Value> result = CompileRun("this.propertyIsEnumerable(undefined)");
CHECK(result->IsFalse());
}
@@ -2512,7 +2509,7 @@ THREADED_TEST(DeepCrossLanguageRecursion) {
call_recursively_script = v8::Handle<Script>();
env->Global()->Set(v8_str("depth"), v8::Integer::New(isolate, 0));
- Script::Compile(v8_str("callFunctionRecursively()"))->Run();
+ CompileRun("callFunctionRecursively()");
}
@@ -2541,11 +2538,11 @@ THREADED_TEST(CallbackExceptionRegression) {
ThrowingPropertyHandlerSet);
LocalContext env;
env->Global()->Set(v8_str("obj"), obj->NewInstance());
- v8::Handle<Value> otto = Script::Compile(v8_str(
- "try { with (obj) { otto; } } catch (e) { e; }"))->Run();
+ v8::Handle<Value> otto = CompileRun(
+ "try { with (obj) { otto; } } catch (e) { e; }");
CHECK_EQ(v8_str("otto"), otto);
- v8::Handle<Value> netto = Script::Compile(v8_str(
- "try { with (obj) { netto = 4; } } catch (e) { e; }"))->Run();
+ v8::Handle<Value> netto = CompileRun(
+ "try { with (obj) { netto = 4; } } catch (e) { e; }");
CHECK_EQ(v8_str("netto"), netto);
}
@@ -2557,7 +2554,7 @@ THREADED_TEST(FunctionPrototype) {
Foo->PrototypeTemplate()->Set(v8_str("plak"), v8_num(321));
LocalContext env;
env->Global()->Set(v8_str("Foo"), Foo->GetFunction());
- Local<Script> script = Script::Compile(v8_str("Foo.prototype.plak"));
+ Local<Script> script = v8_compile("Foo.prototype.plak");
CHECK_EQ(script->Run()->Int32Value(), 321);
}
@@ -2634,6 +2631,10 @@ THREADED_TEST(InternalFieldsAlignedPointers) {
void* huge = reinterpret_cast<void*>(~static_cast<uintptr_t>(1));
CheckAlignedPointerInInternalField(obj, huge);
+
+ v8::UniquePersistent<v8::Object> persistent(isolate, obj);
+ CHECK_EQ(1, Object::InternalFieldCount(persistent));
+ CHECK_EQ(huge, Object::GetAlignedPointerFromInternalField(persistent, 0));
}
@@ -2756,7 +2757,8 @@ THREADED_TEST(SymbolProperties) {
v8::Local<v8::Object> obj = v8::Object::New(isolate);
v8::Local<v8::Symbol> sym1 = v8::Symbol::New(isolate);
- v8::Local<v8::Symbol> sym2 = v8::Symbol::New(isolate, "my-symbol");
+ v8::Local<v8::Symbol> sym2 =
+ v8::Symbol::New(isolate, v8_str("my-symbol"));
CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
@@ -2774,7 +2776,7 @@ THREADED_TEST(SymbolProperties) {
CHECK(!sym1->StrictEquals(sym2));
CHECK(!sym2->StrictEquals(sym1));
- CHECK(sym2->Name()->Equals(v8::String::NewFromUtf8(isolate, "my-symbol")));
+ CHECK(sym2->Name()->Equals(v8_str("my-symbol")));
v8::Local<v8::Value> sym_val = sym2;
CHECK(sym_val->IsSymbol());
@@ -2786,7 +2788,7 @@ THREADED_TEST(SymbolProperties) {
CHECK(sym_obj->IsSymbolObject());
CHECK(!sym2->IsSymbolObject());
CHECK(!obj->IsSymbolObject());
- CHECK(sym_obj->Equals(sym2));
+ CHECK(!sym_obj->Equals(sym2));
CHECK(!sym_obj->StrictEquals(sym2));
CHECK(v8::SymbolObject::Cast(*sym_obj)->Equals(sym_obj));
CHECK(v8::SymbolObject::Cast(*sym_obj)->ValueOf()->Equals(sym2));
@@ -2844,7 +2846,8 @@ THREADED_TEST(PrivateProperties) {
v8::Local<v8::Object> obj = v8::Object::New(isolate);
v8::Local<v8::Private> priv1 = v8::Private::New(isolate);
- v8::Local<v8::Private> priv2 = v8::Private::New(isolate, "my-private");
+ v8::Local<v8::Private> priv2 =
+ v8::Private::New(isolate, v8_str("my-private"));
CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
@@ -2895,6 +2898,55 @@ THREADED_TEST(PrivateProperties) {
}
+THREADED_TEST(GlobalSymbols) {
+ i::FLAG_harmony_symbols = true;
+
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ v8::Local<String> name = v8_str("my-symbol");
+ v8::Local<v8::Symbol> glob = v8::Symbol::For(isolate, name);
+ v8::Local<v8::Symbol> glob2 = v8::Symbol::For(isolate, name);
+ CHECK(glob2->SameValue(glob));
+
+ v8::Local<v8::Symbol> glob_api = v8::Symbol::ForApi(isolate, name);
+ v8::Local<v8::Symbol> glob_api2 = v8::Symbol::ForApi(isolate, name);
+ CHECK(glob_api2->SameValue(glob_api));
+ CHECK(!glob_api->SameValue(glob));
+
+ v8::Local<v8::Symbol> sym = v8::Symbol::New(isolate, name);
+ CHECK(!sym->SameValue(glob));
+
+ CompileRun("var sym2 = Symbol.for('my-symbol')");
+ v8::Local<Value> sym2 = env->Global()->Get(v8_str("sym2"));
+ CHECK(sym2->SameValue(glob));
+ CHECK(!sym2->SameValue(glob_api));
+}
+
+
+THREADED_TEST(GlobalPrivates) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ v8::Local<String> name = v8_str("my-private");
+ v8::Local<v8::Private> glob = v8::Private::ForApi(isolate, name);
+ v8::Local<v8::Object> obj = v8::Object::New(isolate);
+ CHECK(obj->SetPrivate(glob, v8::Integer::New(isolate, 3)));
+
+ v8::Local<v8::Private> glob2 = v8::Private::ForApi(isolate, name);
+ CHECK(obj->HasPrivate(glob2));
+
+ v8::Local<v8::Private> priv = v8::Private::New(isolate, name);
+ CHECK(!obj->HasPrivate(priv));
+
+ CompileRun("var intern = %CreateGlobalPrivateSymbol('my-private')");
+ v8::Local<Value> intern = env->Global()->Get(v8_str("intern"));
+ CHECK(!obj->Has(intern));
+}
+
+
class ScopedArrayBufferContents {
public:
explicit ScopedArrayBufferContents(
@@ -3274,7 +3326,7 @@ THREADED_TEST(External) {
Local<v8::External> ext = v8::External::New(CcTest::isolate(), &x);
LocalContext env;
env->Global()->Set(v8_str("ext"), ext);
- Local<Value> reext_obj = Script::Compile(v8_str("this.ext"))->Run();
+ Local<Value> reext_obj = CompileRun("this.ext");
v8::Handle<v8::External> reext = reext_obj.As<v8::External>();
int* ptr = static_cast<int*>(reext->Value());
CHECK_EQ(x, 3);
@@ -3443,6 +3495,89 @@ THREADED_TEST(UniquePersistent) {
}
+template<typename K, typename V>
+class WeakStdMapTraits : public v8::StdMapTraits<K, V> {
+ public:
+ typedef typename v8::DefaultPersistentValueMapTraits<K, V>::Impl Impl;
+ static const bool kIsWeak = true;
+ struct WeakCallbackDataType {
+ Impl* impl;
+ K key;
+ };
+ static WeakCallbackDataType* WeakCallbackParameter(
+ Impl* impl, const K& key, Local<V> value) {
+ WeakCallbackDataType* data = new WeakCallbackDataType;
+ data->impl = impl;
+ data->key = key;
+ return data;
+ }
+ static Impl* ImplFromWeakCallbackData(
+ const v8::WeakCallbackData<V, WeakCallbackDataType>& data) {
+ return data.GetParameter()->impl;
+ }
+ static K KeyFromWeakCallbackData(
+ const v8::WeakCallbackData<V, WeakCallbackDataType>& data) {
+ return data.GetParameter()->key;
+ }
+ static void DisposeCallbackData(WeakCallbackDataType* data) {
+ delete data;
+ }
+ static void Dispose(v8::Isolate* isolate, v8::UniquePersistent<V> value,
+ Impl* impl, K key) { }
+};
+
+
+template<typename Map>
+static void TestPersistentValueMap() {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ Map map(isolate);
+ v8::internal::GlobalHandles* global_handles =
+ reinterpret_cast<v8::internal::Isolate*>(isolate)->global_handles();
+ int initial_handle_count = global_handles->global_handles_count();
+ CHECK_EQ(0, static_cast<int>(map.Size()));
+ {
+ HandleScope scope(isolate);
+ Local<v8::Object> obj = map.Get(7);
+ CHECK(obj.IsEmpty());
+ Local<v8::Object> expected = v8::Object::New(isolate);
+ map.Set(7, expected);
+ CHECK_EQ(1, static_cast<int>(map.Size()));
+ obj = map.Get(7);
+ CHECK_EQ(expected, obj);
+ v8::UniquePersistent<v8::Object> removed = map.Remove(7);
+ CHECK_EQ(0, static_cast<int>(map.Size()));
+ CHECK(expected == removed);
+ removed = map.Remove(7);
+ CHECK(removed.IsEmpty());
+ map.Set(8, expected);
+ CHECK_EQ(1, static_cast<int>(map.Size()));
+ map.Set(8, expected);
+ CHECK_EQ(1, static_cast<int>(map.Size()));
+ }
+ CHECK_EQ(initial_handle_count + 1, global_handles->global_handles_count());
+ if (map.IsWeak()) {
+ reinterpret_cast<v8::internal::Isolate*>(isolate)->heap()->
+ CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
+ } else {
+ map.Clear();
+ }
+ CHECK_EQ(0, static_cast<int>(map.Size()));
+ CHECK_EQ(initial_handle_count, global_handles->global_handles_count());
+}
+
+
+TEST(PersistentValueMap) {
+ // Default case, w/o weak callbacks:
+ TestPersistentValueMap<v8::StdPersistentValueMap<int, v8::Object> >();
+
+ // Custom traits with weak callbacks:
+ typedef v8::StdPersistentValueMap<int, v8::Object,
+ WeakStdMapTraits<int, v8::Object> > WeakPersistentValueMap;
+ TestPersistentValueMap<WeakPersistentValueMap>();
+}
+
+
THREADED_TEST(GlobalHandleUpcast) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
@@ -3945,7 +4080,7 @@ TEST(ApiObjectGroupsCycleForScavenger) {
THREADED_TEST(ScriptException) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- Local<Script> script = Script::Compile(v8_str("throw 'panama!';"));
+ Local<Script> script = v8_compile("throw 'panama!';");
v8::TryCatch try_catch;
Local<Value> result = script->Run();
CHECK(result.IsEmpty());
@@ -3974,7 +4109,6 @@ static void check_message_0(v8::Handle<v8::Message> message,
v8::Handle<Value> data) {
CHECK_EQ(5.76, data->NumberValue());
CHECK_EQ(6.75, message->GetScriptResourceName()->NumberValue());
- CHECK_EQ(7.56, message->GetScriptData()->NumberValue());
CHECK(!message->IsSharedCrossOrigin());
message_received = true;
}
@@ -3986,11 +4120,7 @@ THREADED_TEST(MessageHandler0) {
CHECK(!message_received);
LocalContext context;
v8::V8::AddMessageListener(check_message_0, v8_num(5.76));
- v8::ScriptOrigin origin =
- v8::ScriptOrigin(v8_str("6.75"));
- v8::Handle<v8::Script> script = Script::Compile(v8_str("throw 'error'"),
- &origin);
- script->SetData(v8_str("7.56"));
+ v8::Handle<v8::Script> script = CompileWithOrigin("throw 'error'", "6.75");
script->Run();
CHECK(message_received);
// clear out the message listener
@@ -4166,13 +4296,13 @@ THREADED_TEST(GetSetProperty) {
context->Global()->Set(v8_str("12"), v8_num(92));
context->Global()->Set(v8::Integer::New(isolate, 16), v8_num(32));
context->Global()->Set(v8_num(13), v8_num(56));
- Local<Value> foo = Script::Compile(v8_str("this.foo"))->Run();
+ Local<Value> foo = CompileRun("this.foo");
CHECK_EQ(14, foo->Int32Value());
- Local<Value> twelve = Script::Compile(v8_str("this[12]"))->Run();
+ Local<Value> twelve = CompileRun("this[12]");
CHECK_EQ(92, twelve->Int32Value());
- Local<Value> sixteen = Script::Compile(v8_str("this[16]"))->Run();
+ Local<Value> sixteen = CompileRun("this[16]");
CHECK_EQ(32, sixteen->Int32Value());
- Local<Value> thirteen = Script::Compile(v8_str("this[13]"))->Run();
+ Local<Value> thirteen = CompileRun("this[13]");
CHECK_EQ(56, thirteen->Int32Value());
CHECK_EQ(92,
context->Global()->Get(v8::Integer::New(isolate, 12))->Int32Value());
@@ -4201,7 +4331,7 @@ THREADED_TEST(PropertyAttributes) {
context->Global()->Set(prop, v8_num(7), v8::ReadOnly);
CHECK_EQ(7, context->Global()->Get(prop)->Int32Value());
CHECK_EQ(v8::ReadOnly, context->Global()->GetPropertyAttributes(prop));
- Script::Compile(v8_str("read_only = 9"))->Run();
+ CompileRun("read_only = 9");
CHECK_EQ(7, context->Global()->Get(prop)->Int32Value());
context->Global()->Set(prop, v8_num(10));
CHECK_EQ(7, context->Global()->Get(prop)->Int32Value());
@@ -4209,7 +4339,7 @@ THREADED_TEST(PropertyAttributes) {
prop = v8_str("dont_delete");
context->Global()->Set(prop, v8_num(13), v8::DontDelete);
CHECK_EQ(13, context->Global()->Get(prop)->Int32Value());
- Script::Compile(v8_str("delete dont_delete"))->Run();
+ CompileRun("delete dont_delete");
CHECK_EQ(13, context->Global()->Get(prop)->Int32Value());
CHECK_EQ(v8::DontDelete, context->Global()->GetPropertyAttributes(prop));
// dont-enum
@@ -4248,7 +4378,7 @@ THREADED_TEST(Array) {
CHECK(!array->Has(1));
CHECK(array->Has(2));
CHECK_EQ(7, array->Get(2)->Int32Value());
- Local<Value> obj = Script::Compile(v8_str("[1, 2, 3]"))->Run();
+ Local<Value> obj = CompileRun("[1, 2, 3]");
Local<v8::Array> arr = obj.As<v8::Array>();
CHECK_EQ(3, arr->Length());
CHECK_EQ(1, arr->Get(0)->Int32Value());
@@ -4399,113 +4529,6 @@ THREADED_TEST(FunctionCall) {
}
-static const char* js_code_causing_out_of_memory =
- "var a = new Array(); while(true) a.push(a);";
-
-
-// These tests run for a long time and prevent us from running tests
-// that come after them so they cannot run in parallel.
-TEST(OutOfMemory) {
- // It's not possible to read a snapshot into a heap with different dimensions.
- if (i::Snapshot::IsEnabled()) return;
- // Set heap limits.
- static const int K = 1024;
- v8::ResourceConstraints constraints;
- constraints.set_max_young_space_size(256 * K);
- constraints.set_max_old_space_size(5 * K * K);
- v8::SetResourceConstraints(CcTest::isolate(), &constraints);
-
- // Execute a script that causes out of memory.
- LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
- v8::V8::IgnoreOutOfMemoryException();
- Local<Script> script = Script::Compile(String::NewFromUtf8(
- context->GetIsolate(), js_code_causing_out_of_memory));
- Local<Value> result = script->Run();
-
- // Check for out of memory state.
- CHECK(result.IsEmpty());
- CHECK(context->HasOutOfMemoryException());
-}
-
-
-void ProvokeOutOfMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
- ApiTestFuzzer::Fuzz();
-
- LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
- Local<Script> script = Script::Compile(String::NewFromUtf8(
- context->GetIsolate(), js_code_causing_out_of_memory));
- Local<Value> result = script->Run();
-
- // Check for out of memory state.
- CHECK(result.IsEmpty());
- CHECK(context->HasOutOfMemoryException());
-
- args.GetReturnValue().Set(result);
-}
-
-
-TEST(OutOfMemoryNested) {
- // It's not possible to read a snapshot into a heap with different dimensions.
- if (i::Snapshot::IsEnabled()) return;
- // Set heap limits.
- static const int K = 1024;
- v8::ResourceConstraints constraints;
- constraints.set_max_young_space_size(256 * K);
- constraints.set_max_old_space_size(5 * K * K);
- v8::Isolate* isolate = CcTest::isolate();
- v8::SetResourceConstraints(isolate, &constraints);
-
- v8::HandleScope scope(isolate);
- Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->Set(v8_str("ProvokeOutOfMemory"),
- v8::FunctionTemplate::New(isolate, ProvokeOutOfMemory));
- LocalContext context(0, templ);
- v8::V8::IgnoreOutOfMemoryException();
- Local<Value> result = CompileRun(
- "var thrown = false;"
- "try {"
- " ProvokeOutOfMemory();"
- "} catch (e) {"
- " thrown = true;"
- "}");
- // Check for out of memory state.
- CHECK(result.IsEmpty());
- CHECK(context->HasOutOfMemoryException());
-}
-
-
-void OOMCallback(const char* location, const char* message) {
- exit(0);
-}
-
-
-TEST(HugeConsStringOutOfMemory) {
- // It's not possible to read a snapshot into a heap with different dimensions.
- if (i::Snapshot::IsEnabled()) return;
- // Set heap limits.
- static const int K = 1024;
- v8::ResourceConstraints constraints;
- constraints.set_max_young_space_size(256 * K);
- constraints.set_max_old_space_size(4 * K * K);
- v8::SetResourceConstraints(CcTest::isolate(), &constraints);
-
- // Execute a script that causes out of memory.
- v8::V8::SetFatalErrorHandler(OOMCallback);
-
- LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
-
- // Build huge string. This should fail with out of memory exception.
- CompileRun(
- "var str = Array.prototype.join.call({length: 513}, \"A\").toUpperCase();"
- "for (var i = 0; i < 22; i++) { str = str + str; }");
-
- CHECK(false); // Should not return.
-}
-
-
THREADED_TEST(ConstructCall) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
@@ -4734,7 +4757,7 @@ void CCatcher(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
v8::HandleScope scope(args.GetIsolate());
v8::TryCatch try_catch;
- Local<Value> result = v8::Script::Compile(args[0]->ToString())->Run();
+ Local<Value> result = CompileRun(args[0]->ToString());
CHECK(!try_catch.HasCaught() || result.IsEmpty());
args.GetReturnValue().Set(try_catch.HasCaught());
}
@@ -4996,9 +5019,7 @@ THREADED_TEST(ExternalScriptException) {
LocalContext context(0, templ);
v8::TryCatch try_catch;
- Local<Script> script
- = Script::Compile(v8_str("ThrowFromC(); throw 'panama';"));
- Local<Value> result = script->Run();
+ Local<Value> result = CompileRun("ThrowFromC(); throw 'panama';");
CHECK(result.IsEmpty());
CHECK(try_catch.HasCaught());
String::Utf8Value exception_value(try_catch.Exception());
@@ -5190,12 +5211,12 @@ THREADED_TEST(CatchZero) {
v8::HandleScope scope(context->GetIsolate());
v8::TryCatch try_catch;
CHECK(!try_catch.HasCaught());
- Script::Compile(v8_str("throw 10"))->Run();
+ CompileRun("throw 10");
CHECK(try_catch.HasCaught());
CHECK_EQ(10, try_catch.Exception()->Int32Value());
try_catch.Reset();
CHECK(!try_catch.HasCaught());
- Script::Compile(v8_str("throw 0"))->Run();
+ CompileRun("throw 0");
CHECK(try_catch.HasCaught());
CHECK_EQ(0, try_catch.Exception()->Int32Value());
}
@@ -5206,7 +5227,7 @@ THREADED_TEST(CatchExceptionFromWith) {
v8::HandleScope scope(context->GetIsolate());
v8::TryCatch try_catch;
CHECK(!try_catch.HasCaught());
- Script::Compile(v8_str("var o = {}; with (o) { throw 42; }"))->Run();
+ CompileRun("var o = {}; with (o) { throw 42; }");
CHECK(try_catch.HasCaught());
}
@@ -5358,7 +5379,7 @@ THREADED_TEST(Equality) {
THREADED_TEST(MultiRun) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
- Local<Script> script = Script::Compile(v8_str("x"));
+ Local<Script> script = v8_compile("x");
for (int i = 0; i < 10; i++)
script->Run();
}
@@ -5380,7 +5401,7 @@ THREADED_TEST(SimplePropertyRead) {
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut"));
context->Global()->Set(v8_str("obj"), templ->NewInstance());
- Local<Script> script = Script::Compile(v8_str("obj.x"));
+ Local<Script> script = v8_compile("obj.x");
for (int i = 0; i < 10; i++) {
Local<Value> result = script->Run();
CHECK_EQ(result, v8_str("x"));
@@ -5397,19 +5418,19 @@ THREADED_TEST(DefinePropertyOnAPIAccessor) {
context->Global()->Set(v8_str("obj"), templ->NewInstance());
// Uses getOwnPropertyDescriptor to check the configurable status
- Local<Script> script_desc
- = Script::Compile(v8_str("var prop = Object.getOwnPropertyDescriptor( "
- "obj, 'x');"
- "prop.configurable;"));
+ Local<Script> script_desc = v8_compile(
+ "var prop = Object.getOwnPropertyDescriptor( "
+ "obj, 'x');"
+ "prop.configurable;");
Local<Value> result = script_desc->Run();
CHECK_EQ(result->BooleanValue(), true);
// Redefine get - but still configurable
- Local<Script> script_define
- = Script::Compile(v8_str("var desc = { get: function(){return 42; },"
- " configurable: true };"
- "Object.defineProperty(obj, 'x', desc);"
- "obj.x"));
+ Local<Script> script_define = v8_compile(
+ "var desc = { get: function(){return 42; },"
+ " configurable: true };"
+ "Object.defineProperty(obj, 'x', desc);"
+ "obj.x");
result = script_define->Run();
CHECK_EQ(result, v8_num(42));
@@ -5418,11 +5439,11 @@ THREADED_TEST(DefinePropertyOnAPIAccessor) {
CHECK_EQ(result->BooleanValue(), true);
// Redefine to a non-configurable
- script_define
- = Script::Compile(v8_str("var desc = { get: function(){return 43; },"
- " configurable: false };"
- "Object.defineProperty(obj, 'x', desc);"
- "obj.x"));
+ script_define = v8_compile(
+ "var desc = { get: function(){return 43; },"
+ " configurable: false };"
+ "Object.defineProperty(obj, 'x', desc);"
+ "obj.x");
result = script_define->Run();
CHECK_EQ(result, v8_num(43));
result = script_desc->Run();
@@ -5445,18 +5466,19 @@ THREADED_TEST(DefinePropertyOnDefineGetterSetter) {
LocalContext context;
context->Global()->Set(v8_str("obj"), templ->NewInstance());
- Local<Script> script_desc = Script::Compile(v8_str("var prop ="
- "Object.getOwnPropertyDescriptor( "
- "obj, 'x');"
- "prop.configurable;"));
+ Local<Script> script_desc = v8_compile(
+ "var prop ="
+ "Object.getOwnPropertyDescriptor( "
+ "obj, 'x');"
+ "prop.configurable;");
Local<Value> result = script_desc->Run();
CHECK_EQ(result->BooleanValue(), true);
- Local<Script> script_define =
- Script::Compile(v8_str("var desc = {get: function(){return 42; },"
- " configurable: true };"
- "Object.defineProperty(obj, 'x', desc);"
- "obj.x"));
+ Local<Script> script_define = v8_compile(
+ "var desc = {get: function(){return 42; },"
+ " configurable: true };"
+ "Object.defineProperty(obj, 'x', desc);"
+ "obj.x");
result = script_define->Run();
CHECK_EQ(result, v8_num(42));
@@ -5465,11 +5487,11 @@ THREADED_TEST(DefinePropertyOnDefineGetterSetter) {
CHECK_EQ(result->BooleanValue(), true);
- script_define =
- Script::Compile(v8_str("var desc = {get: function(){return 43; },"
- " configurable: false };"
- "Object.defineProperty(obj, 'x', desc);"
- "obj.x"));
+ script_define = v8_compile(
+ "var desc = {get: function(){return 43; },"
+ " configurable: false };"
+ "Object.defineProperty(obj, 'x', desc);"
+ "obj.x");
result = script_define->Run();
CHECK_EQ(result, v8_num(43));
result = script_desc->Run();
@@ -5668,7 +5690,7 @@ THREADED_TEST(SimplePropertyWrite) {
templ->SetAccessor(v8_str("x"), GetXValue, SetXValue, v8_str("donut"));
LocalContext context;
context->Global()->Set(v8_str("obj"), templ->NewInstance());
- Local<Script> script = Script::Compile(v8_str("obj.x = 4"));
+ Local<Script> script = v8_compile("obj.x = 4");
for (int i = 0; i < 10; i++) {
CHECK(xValue.IsEmpty());
script->Run();
@@ -5685,7 +5707,7 @@ THREADED_TEST(SetterOnly) {
templ->SetAccessor(v8_str("x"), NULL, SetXValue, v8_str("donut"));
LocalContext context;
context->Global()->Set(v8_str("obj"), templ->NewInstance());
- Local<Script> script = Script::Compile(v8_str("obj.x = 4; obj.x"));
+ Local<Script> script = v8_compile("obj.x = 4; obj.x");
for (int i = 0; i < 10; i++) {
CHECK(xValue.IsEmpty());
script->Run();
@@ -5705,7 +5727,7 @@ THREADED_TEST(NoAccessors) {
v8_str("donut"));
LocalContext context;
context->Global()->Set(v8_str("obj"), templ->NewInstance());
- Local<Script> script = Script::Compile(v8_str("obj.x = 4; obj.x"));
+ Local<Script> script = v8_compile("obj.x = 4; obj.x");
for (int i = 0; i < 10; i++) {
script->Run();
}
@@ -5727,7 +5749,7 @@ THREADED_TEST(NamedInterceptorPropertyRead) {
templ->SetNamedPropertyHandler(XPropertyGetter);
LocalContext context;
context->Global()->Set(v8_str("obj"), templ->NewInstance());
- Local<Script> script = Script::Compile(v8_str("obj.x"));
+ Local<Script> script = v8_compile("obj.x");
for (int i = 0; i < 10; i++) {
Local<Value> result = script->Run();
CHECK_EQ(result, v8_str("x"));
@@ -5743,7 +5765,7 @@ THREADED_TEST(NamedInterceptorDictionaryIC) {
LocalContext context;
// Create an object with a named interceptor.
context->Global()->Set(v8_str("interceptor_obj"), templ->NewInstance());
- Local<Script> script = Script::Compile(v8_str("interceptor_obj.x"));
+ Local<Script> script = v8_compile("interceptor_obj.x");
for (int i = 0; i < 10; i++) {
Local<Value> result = script->Run();
CHECK_EQ(result, v8_str("x"));
@@ -5866,18 +5888,18 @@ THREADED_TEST(IndexedInterceptorWithIndexedAccessor) {
IndexedPropertySetter);
LocalContext context;
context->Global()->Set(v8_str("obj"), templ->NewInstance());
- Local<Script> getter_script = Script::Compile(v8_str(
- "obj.__defineGetter__(\"3\", function(){return 5;});obj[3];"));
- Local<Script> setter_script = Script::Compile(v8_str(
+ Local<Script> getter_script = v8_compile(
+ "obj.__defineGetter__(\"3\", function(){return 5;});obj[3];");
+ Local<Script> setter_script = v8_compile(
"obj.__defineSetter__(\"17\", function(val){this.foo = val;});"
"obj[17] = 23;"
- "obj.foo;"));
- Local<Script> interceptor_setter_script = Script::Compile(v8_str(
+ "obj.foo;");
+ Local<Script> interceptor_setter_script = v8_compile(
"obj.__defineSetter__(\"39\", function(val){this.foo = \"hit\";});"
"obj[39] = 47;"
- "obj.foo;")); // This setter should not run, due to the interceptor.
- Local<Script> interceptor_getter_script = Script::Compile(v8_str(
- "obj[37];"));
+ "obj.foo;"); // This setter should not run, due to the interceptor.
+ Local<Script> interceptor_getter_script = v8_compile(
+ "obj[37];");
Local<Value> result = getter_script->Run();
CHECK_EQ(v8_num(5), result);
result = setter_script->Run();
@@ -5913,10 +5935,10 @@ static void UnboxedDoubleIndexedPropertySetter(
void UnboxedDoubleIndexedPropertyEnumerator(
const v8::PropertyCallbackInfo<v8::Array>& info) {
// Force the list of returned keys to be stored in a FastDoubleArray.
- Local<Script> indexed_property_names_script = Script::Compile(v8_str(
+ Local<Script> indexed_property_names_script = v8_compile(
"keys = new Array(); keys[125000] = 1;"
"for(i = 0; i < 80000; i++) { keys[i] = i; };"
- "keys.length = 25; keys;"));
+ "keys.length = 25; keys;");
Local<Value> result = indexed_property_names_script->Run();
info.GetReturnValue().Set(Local<v8::Array>::Cast(result));
}
@@ -5936,29 +5958,28 @@ THREADED_TEST(IndexedInterceptorUnboxedDoubleWithIndexedAccessor) {
LocalContext context;
context->Global()->Set(v8_str("obj"), templ->NewInstance());
// When obj is created, force it to be Stored in a FastDoubleArray.
- Local<Script> create_unboxed_double_script = Script::Compile(v8_str(
+ Local<Script> create_unboxed_double_script = v8_compile(
"obj[125000] = 1; for(i = 0; i < 80000; i+=2) { obj[i] = i; } "
"key_count = 0; "
"for (x in obj) {key_count++;};"
- "obj;"));
+ "obj;");
Local<Value> result = create_unboxed_double_script->Run();
CHECK(result->ToObject()->HasRealIndexedProperty(2000));
- Local<Script> key_count_check = Script::Compile(v8_str(
- "key_count;"));
+ Local<Script> key_count_check = v8_compile("key_count;");
result = key_count_check->Run();
CHECK_EQ(v8_num(40013), result);
}
-void NonStrictArgsIndexedPropertyEnumerator(
+void SloppyArgsIndexedPropertyEnumerator(
const v8::PropertyCallbackInfo<v8::Array>& info) {
// Force the list of returned keys to be stored in a Arguments object.
- Local<Script> indexed_property_names_script = Script::Compile(v8_str(
+ Local<Script> indexed_property_names_script = v8_compile(
"function f(w,x) {"
" return arguments;"
"}"
"keys = f(0, 1, 2, 3);"
- "keys;"));
+ "keys;");
Local<Object> result =
Local<Object>::Cast(indexed_property_names_script->Run());
// Have to populate the handle manually, as it's not Cast-able.
@@ -5969,7 +5990,7 @@ void NonStrictArgsIndexedPropertyEnumerator(
}
-static void NonStrictIndexedPropertyGetter(
+static void SloppyIndexedPropertyGetter(
uint32_t index,
const v8::PropertyCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
@@ -5981,21 +6002,20 @@ static void NonStrictIndexedPropertyGetter(
// Make sure that the the interceptor code in the runtime properly handles
// merging property name lists for non-string arguments arrays.
-THREADED_TEST(IndexedInterceptorNonStrictArgsWithIndexedAccessor) {
+THREADED_TEST(IndexedInterceptorSloppyArgsWithIndexedAccessor) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetIndexedPropertyHandler(NonStrictIndexedPropertyGetter,
+ templ->SetIndexedPropertyHandler(SloppyIndexedPropertyGetter,
0,
0,
0,
- NonStrictArgsIndexedPropertyEnumerator);
+ SloppyArgsIndexedPropertyEnumerator);
LocalContext context;
context->Global()->Set(v8_str("obj"), templ->NewInstance());
- Local<Script> create_args_script =
- Script::Compile(v8_str(
- "var key_count = 0;"
- "for (x in obj) {key_count++;} key_count;"));
+ Local<Script> create_args_script = v8_compile(
+ "var key_count = 0;"
+ "for (x in obj) {key_count++;} key_count;");
Local<Value> result = create_args_script->Run();
CHECK_EQ(v8_num(4), result);
}
@@ -6370,11 +6390,11 @@ THREADED_TEST(Regress892105) {
"8901");
LocalContext env0;
- Local<Script> script0 = Script::Compile(source);
+ Local<Script> script0 = v8_compile(source);
CHECK_EQ(8901.0, script0->Run()->NumberValue());
LocalContext env1;
- Local<Script> script1 = Script::Compile(source);
+ Local<Script> script1 = v8_compile(source);
CHECK_EQ(8901.0, script1->Run()->NumberValue());
}
@@ -6481,19 +6501,19 @@ THREADED_TEST(ExtensibleOnUndetectable) {
Local<String> source = v8_str("undetectable.x = 42;"
"undetectable.x");
- Local<Script> script = Script::Compile(source);
+ Local<Script> script = v8_compile(source);
CHECK_EQ(v8::Integer::New(isolate, 42), script->Run());
ExpectBoolean("Object.isExtensible(undetectable)", true);
source = v8_str("Object.preventExtensions(undetectable);");
- script = Script::Compile(source);
+ script = v8_compile(source);
script->Run();
ExpectBoolean("Object.isExtensible(undetectable)", false);
source = v8_str("undetectable.y = 2000;");
- script = Script::Compile(source);
+ script = v8_compile(source);
script->Run();
ExpectBoolean("undetectable.y == undefined", true);
}
@@ -6586,7 +6606,7 @@ TEST(PersistentHandles) {
Local<String> str = v8_str("foo");
v8::Persistent<String> p_str(isolate, str);
p_str.Reset();
- Local<Script> scr = Script::Compile(v8_str(""));
+ Local<Script> scr = v8_compile("");
v8::Persistent<Script> p_scr(isolate, scr);
p_scr.Reset();
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
@@ -6609,7 +6629,7 @@ THREADED_TEST(GlobalObjectTemplate) {
v8::FunctionTemplate::New(isolate, HandleLogDelegator));
v8::Local<Context> context = Context::New(isolate, 0, global_template);
Context::Scope context_scope(context);
- Script::Compile(v8_str("JSNI_Log('LOG')"))->Run();
+ CompileRun("JSNI_Log('LOG')");
}
@@ -6627,7 +6647,7 @@ TEST(SimpleExtensions) {
v8::Handle<Context> context =
Context::New(CcTest::isolate(), &extensions);
Context::Scope lock(context);
- v8::Handle<Value> result = Script::Compile(v8_str("Foo()"))->Run();
+ v8::Handle<Value> result = CompileRun("Foo()");
CHECK_EQ(result, v8::Integer::New(CcTest::isolate(), 4));
}
@@ -6640,7 +6660,7 @@ TEST(NullExtensions) {
v8::Handle<Context> context =
Context::New(CcTest::isolate(), &extensions);
Context::Scope lock(context);
- v8::Handle<Value> result = Script::Compile(v8_str("1+3"))->Run();
+ v8::Handle<Value> result = CompileRun("1+3");
CHECK_EQ(result, v8::Integer::New(CcTest::isolate(), 4));
}
@@ -6678,7 +6698,7 @@ TEST(ExtensionWithSourceLength) {
Context::New(CcTest::isolate(), &extensions);
if (source_len == kEmbeddedExtensionSourceValidLen) {
Context::Scope lock(context);
- v8::Handle<Value> result = Script::Compile(v8_str("Ret54321()"))->Run();
+ v8::Handle<Value> result = CompileRun("Ret54321()");
CHECK_EQ(v8::Integer::New(CcTest::isolate(), 54321), result);
} else {
// Anything but exactly the right length should fail to compile.
@@ -6714,9 +6734,9 @@ TEST(UseEvalFromExtension) {
v8::Handle<Context> context =
Context::New(CcTest::isolate(), &extensions);
Context::Scope lock(context);
- v8::Handle<Value> result = Script::Compile(v8_str("UseEval1()"))->Run();
+ v8::Handle<Value> result = CompileRun("UseEval1()");
CHECK_EQ(result, v8::Integer::New(CcTest::isolate(), 42));
- result = Script::Compile(v8_str("UseEval2()"))->Run();
+ result = CompileRun("UseEval2()");
CHECK_EQ(result, v8::Integer::New(CcTest::isolate(), 42));
}
@@ -6748,9 +6768,9 @@ TEST(UseWithFromExtension) {
v8::Handle<Context> context =
Context::New(CcTest::isolate(), &extensions);
Context::Scope lock(context);
- v8::Handle<Value> result = Script::Compile(v8_str("UseWith1()"))->Run();
+ v8::Handle<Value> result = CompileRun("UseWith1()");
CHECK_EQ(result, v8::Integer::New(CcTest::isolate(), 87));
- result = Script::Compile(v8_str("UseWith2()"))->Run();
+ result = CompileRun("UseWith2()");
CHECK_EQ(result, v8::Integer::New(CcTest::isolate(), 87));
}
@@ -6763,7 +6783,7 @@ TEST(AutoExtensions) {
v8::Handle<Context> context =
Context::New(CcTest::isolate());
Context::Scope lock(context);
- v8::Handle<Value> result = Script::Compile(v8_str("Foo()"))->Run();
+ v8::Handle<Value> result = CompileRun("Foo()");
CHECK_EQ(result, v8::Integer::New(CcTest::isolate(), 4));
}
@@ -6823,7 +6843,7 @@ TEST(NativeCallInExtensions) {
v8::Handle<Context> context =
Context::New(CcTest::isolate(), &extensions);
Context::Scope lock(context);
- v8::Handle<Value> result = Script::Compile(v8_str(kNativeCallTest))->Run();
+ v8::Handle<Value> result = CompileRun(kNativeCallTest);
CHECK_EQ(result, v8::Integer::New(CcTest::isolate(), 3));
}
@@ -6860,7 +6880,7 @@ TEST(NativeFunctionDeclaration) {
v8::Handle<Context> context =
Context::New(CcTest::isolate(), &extensions);
Context::Scope lock(context);
- v8::Handle<Value> result = Script::Compile(v8_str("foo(42);"))->Run();
+ v8::Handle<Value> result = CompileRun("foo(42);");
CHECK_EQ(result, v8::Integer::New(CcTest::isolate(), 42));
}
@@ -6991,11 +7011,11 @@ THREADED_TEST(FunctionLookup) {
LocalContext context(&config);
CHECK_EQ(3, lookup_count);
CHECK_EQ(v8::Integer::New(CcTest::isolate(), 8),
- Script::Compile(v8_str("Foo(0)"))->Run());
+ CompileRun("Foo(0)"));
CHECK_EQ(v8::Integer::New(CcTest::isolate(), 7),
- Script::Compile(v8_str("Foo(1)"))->Run());
+ CompileRun("Foo(1)"));
CHECK_EQ(v8::Integer::New(CcTest::isolate(), 6),
- Script::Compile(v8_str("Foo(2)"))->Run());
+ CompileRun("Foo(2)"));
}
@@ -7009,11 +7029,11 @@ THREADED_TEST(NativeFunctionConstructCall) {
// Run a few times to ensure that allocation of objects doesn't
// change behavior of a constructor function.
CHECK_EQ(v8::Integer::New(CcTest::isolate(), 8),
- Script::Compile(v8_str("(new A()).data"))->Run());
+ CompileRun("(new A()).data"));
CHECK_EQ(v8::Integer::New(CcTest::isolate(), 7),
- Script::Compile(v8_str("(new B()).data"))->Run());
+ CompileRun("(new B()).data"));
CHECK_EQ(v8::Integer::New(CcTest::isolate(), 6),
- Script::Compile(v8_str("(new C()).data"))->Run());
+ CompileRun("(new C()).data"));
}
}
@@ -7059,7 +7079,7 @@ THREADED_TEST(ErrorWithMissingScriptInfo) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
v8::V8::AddMessageListener(MissingScriptInfoMessageListener);
- Script::Compile(v8_str("throw Error()"))->Run();
+ CompileRun("throw Error()");
v8::V8::RemoveMessageListeners(MissingScriptInfoMessageListener);
}
@@ -7099,14 +7119,14 @@ THREADED_TEST(IndependentWeakHandle) {
object_a.handle.MarkIndependent();
object_b.handle.MarkIndependent();
CHECK(object_b.handle.IsIndependent());
- CcTest::heap()->PerformScavenge();
+ CcTest::heap()->CollectGarbage(i::NEW_SPACE);
CHECK(object_a.flag);
CHECK(object_b.flag);
}
static void InvokeScavenge() {
- CcTest::heap()->PerformScavenge();
+ CcTest::heap()->CollectGarbage(i::NEW_SPACE);
}
@@ -7188,7 +7208,7 @@ THREADED_TEST(IndependentHandleRevival) {
object.flag = false;
object.handle.SetWeak(&object, &RevivingCallback);
object.handle.MarkIndependent();
- CcTest::heap()->PerformScavenge();
+ CcTest::heap()->CollectGarbage(i::NEW_SPACE);
CHECK(object.flag);
CcTest::heap()->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
{
@@ -7480,7 +7500,7 @@ THREADED_TEST(ObjectInstantiation) {
CHECK_NE(obj, context->Global()->Get(v8_str("o")));
context->Global()->Set(v8_str("o2"), obj);
v8::Handle<Value> value =
- Script::Compile(v8_str("o.__proto__ === o2.__proto__"))->Run();
+ CompileRun("o.__proto__ === o2.__proto__");
CHECK_EQ(v8::True(isolate), value);
context->Global()->Set(v8_str("o"), obj);
}
@@ -8241,13 +8261,14 @@ TEST(ApiUncaughtException) {
v8::Local<v8::Object> global = env->Global();
global->Set(v8_str("trouble"), fun->GetFunction());
- Script::Compile(v8_str("function trouble_callee() {"
- " var x = null;"
- " return x.foo;"
- "};"
- "function trouble_caller() {"
- " trouble();"
- "};"))->Run();
+ CompileRun(
+ "function trouble_callee() {"
+ " var x = null;"
+ " return x.foo;"
+ "};"
+ "function trouble_caller() {"
+ " trouble();"
+ "};");
Local<Value> trouble = global->Get(v8_str("trouble"));
CHECK(trouble->IsFunction());
Local<Value> trouble_callee = global->Get(v8_str("trouble_callee"));
@@ -8283,13 +8304,12 @@ TEST(ExceptionInNativeScript) {
v8::Local<v8::Object> global = env->Global();
global->Set(v8_str("trouble"), fun->GetFunction());
- Script::Compile(
- v8_str(
- "function trouble() {\n"
- " var o = {};\n"
- " new o.foo();\n"
- "};"),
- v8::String::NewFromUtf8(isolate, script_resource_name))->Run();
+ CompileRunWithOrigin(
+ "function trouble() {\n"
+ " var o = {};\n"
+ " new o.foo();\n"
+ "};",
+ script_resource_name);
Local<Value> trouble = global->Get(v8_str("trouble"));
CHECK(trouble->IsFunction());
Function::Cast(*trouble)->Call(global, 0, NULL);
@@ -8301,7 +8321,7 @@ TEST(CompilationErrorUsingTryCatchHandler) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
v8::TryCatch try_catch;
- Script::Compile(v8_str("This doesn't &*&@#$&*^ compile."));
+ v8_compile("This doesn't &*&@#$&*^ compile.");
CHECK_NE(NULL, *try_catch.Exception());
CHECK(try_catch.HasCaught());
}
@@ -8311,18 +8331,20 @@ TEST(TryCatchFinallyUsingTryCatchHandler) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
v8::TryCatch try_catch;
- Script::Compile(v8_str("try { throw ''; } catch (e) {}"))->Run();
+ CompileRun("try { throw ''; } catch (e) {}");
CHECK(!try_catch.HasCaught());
- Script::Compile(v8_str("try { throw ''; } finally {}"))->Run();
+ CompileRun("try { throw ''; } finally {}");
CHECK(try_catch.HasCaught());
try_catch.Reset();
- Script::Compile(v8_str("(function() {"
- "try { throw ''; } finally { return; }"
- "})()"))->Run();
+ CompileRun(
+ "(function() {"
+ "try { throw ''; } finally { return; }"
+ "})()");
CHECK(!try_catch.HasCaught());
- Script::Compile(v8_str("(function()"
- " { try { throw ''; } finally { throw 0; }"
- "})()"))->Run();
+ CompileRun(
+ "(function()"
+ " { try { throw ''; } finally { throw 0; }"
+ "})()");
CHECK(try_catch.HasCaught());
}
@@ -8400,12 +8422,12 @@ THREADED_TEST(SecurityChecks) {
env1->SetSecurityToken(foo);
// Create a function in env1.
- Script::Compile(v8_str("spy=function(){return spy;}"))->Run();
+ CompileRun("spy=function(){return spy;}");
Local<Value> spy = env1->Global()->Get(v8_str("spy"));
CHECK(spy->IsFunction());
// Create another function accessing global objects.
- Script::Compile(v8_str("spy2=function(){return new this.Array();}"))->Run();
+ CompileRun("spy2=function(){return new this.Array();}");
Local<Value> spy2 = env1->Global()->Get(v8_str("spy2"));
CHECK(spy2->IsFunction());
@@ -8518,7 +8540,7 @@ THREADED_TEST(CrossDomainDelete) {
{
Context::Scope scope_env2(env2);
Local<Value> result =
- Script::Compile(v8_str("delete env1.prop"))->Run();
+ CompileRun("delete env1.prop");
CHECK(result->IsFalse());
}
@@ -8548,7 +8570,7 @@ THREADED_TEST(CrossDomainIsPropertyEnumerable) {
Local<String> test = v8_str("propertyIsEnumerable.call(env1, 'prop')");
{
Context::Scope scope_env2(env2);
- Local<Value> result = Script::Compile(test)->Run();
+ Local<Value> result = CompileRun(test);
CHECK(result->IsTrue());
}
@@ -8556,7 +8578,7 @@ THREADED_TEST(CrossDomainIsPropertyEnumerable) {
env2->SetSecurityToken(bar);
{
Context::Scope scope_env2(env2);
- Local<Value> result = Script::Compile(test)->Run();
+ Local<Value> result = CompileRun(test);
CHECK(result->IsFalse());
}
}
@@ -9768,10 +9790,10 @@ THREADED_TEST(InstanceProperties) {
Local<Value> o = t->GetFunction()->NewInstance();
context->Global()->Set(v8_str("i"), o);
- Local<Value> value = Script::Compile(v8_str("i.x"))->Run();
+ Local<Value> value = CompileRun("i.x");
CHECK_EQ(42, value->Int32Value());
- value = Script::Compile(v8_str("i.f()"))->Run();
+ value = CompileRun("i.f()");
CHECK_EQ(12, value->Int32Value());
}
@@ -9820,22 +9842,22 @@ THREADED_TEST(GlobalObjectInstanceProperties) {
// environment initialization.
global_object = env->Global();
- Local<Value> value = Script::Compile(v8_str("x"))->Run();
+ Local<Value> value = CompileRun("x");
CHECK_EQ(42, value->Int32Value());
- value = Script::Compile(v8_str("f()"))->Run();
+ value = CompileRun("f()");
CHECK_EQ(12, value->Int32Value());
- value = Script::Compile(v8_str(script))->Run();
+ value = CompileRun(script);
CHECK_EQ(1, value->Int32Value());
}
{
// Create new environment reusing the global object.
LocalContext env(NULL, instance_template, global_object);
- Local<Value> value = Script::Compile(v8_str("x"))->Run();
+ Local<Value> value = CompileRun("x");
CHECK_EQ(42, value->Int32Value());
- value = Script::Compile(v8_str("f()"))->Run();
+ value = CompileRun("f()");
CHECK_EQ(12, value->Int32Value());
- value = Script::Compile(v8_str(script))->Run();
+ value = CompileRun(script);
CHECK_EQ(1, value->Int32Value());
}
}
@@ -9870,14 +9892,14 @@ THREADED_TEST(CallKnownGlobalReceiver) {
// Hold on to the global object so it can be used again in another
// environment initialization.
global_object = env->Global();
- foo = Script::Compile(v8_str(script))->Run();
+ foo = CompileRun(script);
}
{
// Create new environment reusing the global object.
LocalContext env(NULL, instance_template, global_object);
env->Global()->Set(v8_str("foo"), foo);
- Script::Compile(v8_str("foo()"))->Run();
+ CompileRun("foo()");
}
}
@@ -9946,19 +9968,19 @@ THREADED_TEST(ShadowObject) {
context->Global()->Set(v8_str("__proto__"), o);
Local<Value> value =
- Script::Compile(v8_str("this.propertyIsEnumerable(0)"))->Run();
+ CompileRun("this.propertyIsEnumerable(0)");
CHECK(value->IsBoolean());
CHECK(!value->BooleanValue());
- value = Script::Compile(v8_str("x"))->Run();
+ value = CompileRun("x");
CHECK_EQ(12, value->Int32Value());
- value = Script::Compile(v8_str("f()"))->Run();
+ value = CompileRun("f()");
CHECK_EQ(42, value->Int32Value());
- Script::Compile(v8_str("y = 43"))->Run();
+ CompileRun("y = 43");
CHECK_EQ(1, shadow_y_setter_call_count);
- value = Script::Compile(v8_str("y"))->Run();
+ value = CompileRun("y");
CHECK_EQ(1, shadow_y_getter_call_count);
CHECK_EQ(42, value->Int32Value());
}
@@ -10217,10 +10239,11 @@ THREADED_TEST(Regress269562) {
Local<v8::Object> o2 = t2->GetFunction()->NewInstance();
CHECK(o2->SetPrototype(o1));
- v8::Local<v8::Symbol> sym = v8::Symbol::New(context->GetIsolate(), "s1");
+ v8::Local<v8::Symbol> sym =
+ v8::Symbol::New(context->GetIsolate(), v8_str("s1"));
o1->Set(sym, v8_num(3));
- o1->SetHiddenValue(v8_str("h1"),
- v8::Integer::New(context->GetIsolate(), 2013));
+ o1->SetHiddenValue(
+ v8_str("h1"), v8::Integer::New(context->GetIsolate(), 2013));
// Call the runtime version of GetLocalPropertyNames() on
// the natively created object through JavaScript.
@@ -10582,29 +10605,29 @@ THREADED_TEST(EvalAliasedDynamic) {
v8::HandleScope scope(current->GetIsolate());
// Tests where aliased eval can only be resolved dynamically.
- Local<Script> script =
- Script::Compile(v8_str("function f(x) { "
- " var foo = 2;"
- " with (x) { return eval('foo'); }"
- "}"
- "foo = 0;"
- "result1 = f(new Object());"
- "result2 = f(this);"
- "var x = new Object();"
- "x.eval = function(x) { return 1; };"
- "result3 = f(x);"));
+ Local<Script> script = v8_compile(
+ "function f(x) { "
+ " var foo = 2;"
+ " with (x) { return eval('foo'); }"
+ "}"
+ "foo = 0;"
+ "result1 = f(new Object());"
+ "result2 = f(this);"
+ "var x = new Object();"
+ "x.eval = function(x) { return 1; };"
+ "result3 = f(x);");
script->Run();
CHECK_EQ(2, current->Global()->Get(v8_str("result1"))->Int32Value());
CHECK_EQ(0, current->Global()->Get(v8_str("result2"))->Int32Value());
CHECK_EQ(1, current->Global()->Get(v8_str("result3"))->Int32Value());
v8::TryCatch try_catch;
- script =
- Script::Compile(v8_str("function f(x) { "
- " var bar = 2;"
- " with (x) { return eval('bar'); }"
- "}"
- "result4 = f(this)"));
+ script = v8_compile(
+ "function f(x) { "
+ " var bar = 2;"
+ " with (x) { return eval('bar'); }"
+ "}"
+ "result4 = f(this)");
script->Run();
CHECK(!try_catch.HasCaught());
CHECK_EQ(2, current->Global()->Get(v8_str("result4"))->Int32Value());
@@ -10626,8 +10649,7 @@ THREADED_TEST(CrossEval) {
current->Global()->Set(v8_str("other"), other->Global());
// Check that new variables are introduced in other context.
- Local<Script> script =
- Script::Compile(v8_str("other.eval('var foo = 1234')"));
+ Local<Script> script = v8_compile("other.eval('var foo = 1234')");
script->Run();
Local<Value> foo = other->Global()->Get(v8_str("foo"));
CHECK_EQ(1234, foo->Int32Value());
@@ -10635,8 +10657,7 @@ THREADED_TEST(CrossEval) {
// Check that writing to non-existing properties introduces them in
// the other context.
- script =
- Script::Compile(v8_str("other.eval('na = 1234')"));
+ script = v8_compile("other.eval('na = 1234')");
script->Run();
CHECK_EQ(1234, other->Global()->Get(v8_str("na"))->Int32Value());
CHECK(!current->Global()->Has(v8_str("na")));
@@ -10644,19 +10665,18 @@ THREADED_TEST(CrossEval) {
// Check that global variables in current context are not visible in other
// context.
v8::TryCatch try_catch;
- script =
- Script::Compile(v8_str("var bar = 42; other.eval('bar');"));
+ script = v8_compile("var bar = 42; other.eval('bar');");
Local<Value> result = script->Run();
CHECK(try_catch.HasCaught());
try_catch.Reset();
// Check that local variables in current context are not visible in other
// context.
- script =
- Script::Compile(v8_str("(function() { "
- " var baz = 87;"
- " return other.eval('baz');"
- "})();"));
+ script = v8_compile(
+ "(function() { "
+ " var baz = 87;"
+ " return other.eval('baz');"
+ "})();");
result = script->Run();
CHECK(try_catch.HasCaught());
try_catch.Reset();
@@ -10664,30 +10684,28 @@ THREADED_TEST(CrossEval) {
// Check that global variables in the other environment are visible
// when evaluting code.
other->Global()->Set(v8_str("bis"), v8_num(1234));
- script = Script::Compile(v8_str("other.eval('bis')"));
+ script = v8_compile("other.eval('bis')");
CHECK_EQ(1234, script->Run()->Int32Value());
CHECK(!try_catch.HasCaught());
// Check that the 'this' pointer points to the global object evaluating
// code.
other->Global()->Set(v8_str("t"), other->Global());
- script = Script::Compile(v8_str("other.eval('this == t')"));
+ script = v8_compile("other.eval('this == t')");
result = script->Run();
CHECK(result->IsTrue());
CHECK(!try_catch.HasCaught());
// Check that variables introduced in with-statement are not visible in
// other context.
- script =
- Script::Compile(v8_str("with({x:2}){other.eval('x')}"));
+ script = v8_compile("with({x:2}){other.eval('x')}");
result = script->Run();
CHECK(try_catch.HasCaught());
try_catch.Reset();
// Check that you cannot use 'eval.call' with another object than the
// current global object.
- script =
- Script::Compile(v8_str("other.y = 1; eval.call(other, 'y')"));
+ script = v8_compile("other.y = 1; eval.call(other, 'y')");
result = script->Run();
CHECK(try_catch.HasCaught());
}
@@ -10742,8 +10760,7 @@ THREADED_TEST(CrossLazyLoad) {
current->Global()->Set(v8_str("other"), other->Global());
// Trigger lazy loading in other context.
- Local<Script> script =
- Script::Compile(v8_str("other.eval('new Date(42)')"));
+ Local<Script> script = v8_compile("other.eval('new Date(42)')");
Local<Value> value = script->Run();
CHECK_EQ(42.0, value->NumberValue());
}
@@ -12929,7 +12946,6 @@ static void ChildGetter(Local<String> name,
THREADED_TEST(Overriding) {
- i::FLAG_es5_readonly = true;
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
@@ -14123,7 +14139,8 @@ UNINITIALIZED_TEST(SetJitCodeEventHandler) {
// have remnants of state from other code.
v8::Isolate* isolate = v8::Isolate::New();
isolate->Enter();
- i::Heap* heap = reinterpret_cast<i::Isolate*>(isolate)->heap();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::Heap* heap = i_isolate->heap();
{
v8::HandleScope scope(isolate);
@@ -14143,7 +14160,7 @@ UNINITIALIZED_TEST(SetJitCodeEventHandler) {
const int kIterations = 10;
for (int i = 0; i < kIterations; ++i) {
LocalContext env(isolate);
- i::AlwaysAllocateScope always_allocate;
+ i::AlwaysAllocateScope always_allocate(i_isolate);
SimulateFullSpace(heap->code_space());
CompileRun(script);
@@ -14248,14 +14265,12 @@ TEST(CatchStackOverflow) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
v8::TryCatch try_catch;
- v8::Handle<v8::Script> script = v8::Script::Compile(v8::String::NewFromUtf8(
- context->GetIsolate(),
+ v8::Handle<v8::Value> result = CompileRun(
"function f() {"
" return f();"
"}"
""
- "f();"));
- v8::Handle<v8::Value> result = script->Run();
+ "f();");
CHECK(result.IsEmpty());
}
@@ -14285,8 +14300,7 @@ static void CheckTryCatchSourceInfo(v8::Handle<v8::Script> script,
THREADED_TEST(TryCatchSourceInfo) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
- v8::Handle<v8::String> source = v8::String::NewFromUtf8(
- context->GetIsolate(),
+ v8::Local<v8::String> source = v8_str(
"function Foo() {\n"
" return Bar();\n"
"}\n"
@@ -14304,8 +14318,7 @@ THREADED_TEST(TryCatchSourceInfo) {
const char* resource_name;
v8::Handle<v8::Script> script;
resource_name = "test.js";
- script = v8::Script::Compile(
- source, v8::String::NewFromUtf8(context->GetIsolate(), resource_name));
+ script = CompileWithOrigin(source, resource_name);
CheckTryCatchSourceInfo(script, resource_name, 0);
resource_name = "test1.js";
@@ -14330,10 +14343,8 @@ THREADED_TEST(CompilationCache) {
v8::String::NewFromUtf8(context->GetIsolate(), "1234");
v8::Handle<v8::String> source1 =
v8::String::NewFromUtf8(context->GetIsolate(), "1234");
- v8::Handle<v8::Script> script0 = v8::Script::Compile(
- source0, v8::String::NewFromUtf8(context->GetIsolate(), "test.js"));
- v8::Handle<v8::Script> script1 = v8::Script::Compile(
- source1, v8::String::NewFromUtf8(context->GetIsolate(), "test.js"));
+ v8::Handle<v8::Script> script0 = CompileWithOrigin(source0, "test.js");
+ v8::Handle<v8::Script> script1 = CompileWithOrigin(source1, "test.js");
v8::Handle<v8::Script> script2 =
v8::Script::Compile(source0); // different origin
CHECK_EQ(1234, script0->Run()->Int32Value());
@@ -14406,8 +14417,7 @@ THREADED_TEST(PropertyEnumeration) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::Value> obj = v8::Script::Compile(v8::String::NewFromUtf8(
- context->GetIsolate(),
+ v8::Handle<v8::Value> obj = CompileRun(
"var result = [];"
"result[0] = {};"
"result[1] = {a: 1, b: 2};"
@@ -14415,7 +14425,7 @@ THREADED_TEST(PropertyEnumeration) {
"var proto = {x: 1, y: 2, z: 3};"
"var x = { __proto__: proto, w: 0, z: 1 };"
"result[3] = x;"
- "result;"))->Run();
+ "result;");
v8::Handle<v8::Array> elms = obj.As<v8::Array>();
CHECK_EQ(4, elms->Length());
int elmc0 = 0;
@@ -14451,8 +14461,7 @@ THREADED_TEST(PropertyEnumeration2) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::Value> obj = v8::Script::Compile(v8::String::NewFromUtf8(
- context->GetIsolate(),
+ v8::Handle<v8::Value> obj = CompileRun(
"var result = [];"
"result[0] = {};"
"result[1] = {a: 1, b: 2};"
@@ -14460,7 +14469,7 @@ THREADED_TEST(PropertyEnumeration2) {
"var proto = {x: 1, y: 2, z: 3};"
"var x = { __proto__: proto, w: 0, z: 1 };"
"result[3] = x;"
- "result;"))->Run();
+ "result;");
v8::Handle<v8::Array> elms = obj.As<v8::Array>();
CHECK_EQ(4, elms->Length());
int elmc0 = 0;
@@ -14866,8 +14875,13 @@ TEST(PreCompileInvalidPreparseDataError) {
sd_data[kHeaderSize + 1 * kFunctionEntrySize + kFunctionEntryEndOffset] = 0;
v8::TryCatch try_catch;
- Local<String> source = String::NewFromUtf8(isolate, script);
- Local<Script> compiled_script = Script::New(source, NULL, sd);
+ v8::ScriptCompiler::Source script_source(
+ String::NewFromUtf8(isolate, script),
+ new v8::ScriptCompiler::CachedData(
+ reinterpret_cast<const uint8_t*>(sd->Data()), sd->Length()));
+ Local<v8::UnboundScript> compiled_script =
+ v8::ScriptCompiler::CompileUnbound(isolate, &script_source);
+
CHECK(try_catch.HasCaught());
String::Utf8Value exception_value(try_catch.Message()->Get());
CHECK_EQ("Uncaught SyntaxError: Invalid preparser data for function bar",
@@ -14884,7 +14898,12 @@ TEST(PreCompileInvalidPreparseDataError) {
sd_data = reinterpret_cast<unsigned*>(const_cast<char*>(sd->Data()));
sd_data[kHeaderSize + 1 * kFunctionEntrySize + kFunctionEntryStartOffset] =
200;
- compiled_script = Script::New(source, NULL, sd);
+ v8::ScriptCompiler::Source script_source2(
+ String::NewFromUtf8(isolate, script),
+ new v8::ScriptCompiler::CachedData(
+ reinterpret_cast<const uint8_t*>(sd->Data()), sd->Length()));
+ compiled_script =
+ v8::ScriptCompiler::CompileUnbound(isolate, &script_source2);
CHECK(!try_catch.HasCaught());
delete sd;
@@ -15200,7 +15219,6 @@ TEST(RegExpInterruption) {
// Test that we cannot set a property on the global object if there
// is a read-only property in the prototype chain.
TEST(ReadOnlyPropertyInGlobalProto) {
- i::FLAG_es5_readonly = true;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
v8::Handle<v8::ObjectTemplate> templ = v8::ObjectTemplate::New(isolate);
@@ -15555,7 +15573,6 @@ THREADED_TEST(GetCallingContext) {
// Check that a variable declaration with no explicit initialization
// value does shadow an existing property in the prototype chain.
THREADED_TEST(InitGlobalVarInProtoChain) {
- i::FLAG_es52_globals = true;
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
// Introduce a variable in the prototype chain.
@@ -15592,7 +15609,7 @@ static void CheckElementValue(i::Isolate* isolate,
int expected,
i::Handle<i::Object> obj,
int offset) {
- i::Object* element = obj->GetElement(isolate, offset)->ToObjectChecked();
+ i::Object* element = *i::Object::GetElement(isolate, obj, offset);
CHECK_EQ(expected, i::Smi::cast(element)->value());
}
@@ -15679,20 +15696,17 @@ THREADED_TEST(PixelArray) {
i::Handle<i::Smi> value(i::Smi::FromInt(2),
reinterpret_cast<i::Isolate*>(context->GetIsolate()));
i::Handle<i::Object> no_failure;
- no_failure =
- i::JSObject::SetElement(jsobj, 1, value, NONE, i::kNonStrictMode);
+ no_failure = i::JSObject::SetElement(jsobj, 1, value, NONE, i::SLOPPY);
ASSERT(!no_failure.is_null());
i::USE(no_failure);
CheckElementValue(isolate, 2, jsobj, 1);
*value.location() = i::Smi::FromInt(256);
- no_failure =
- i::JSObject::SetElement(jsobj, 1, value, NONE, i::kNonStrictMode);
+ no_failure = i::JSObject::SetElement(jsobj, 1, value, NONE, i::SLOPPY);
ASSERT(!no_failure.is_null());
i::USE(no_failure);
CheckElementValue(isolate, 255, jsobj, 1);
*value.location() = i::Smi::FromInt(-1);
- no_failure =
- i::JSObject::SetElement(jsobj, 1, value, NONE, i::kNonStrictMode);
+ no_failure = i::JSObject::SetElement(jsobj, 1, value, NONE, i::SLOPPY);
ASSERT(!no_failure.is_null());
i::USE(no_failure);
CheckElementValue(isolate, 0, jsobj, 1);
@@ -16229,7 +16243,7 @@ static void ObjectWithExternalArrayTestHelper(
array_type == v8::kExternalFloat32Array) {
CHECK_EQ(static_cast<int>(i::OS::nan_value()),
static_cast<int>(
- jsobj->GetElement(isolate, 7)->ToObjectChecked()->Number()));
+ i::Object::GetElement(isolate, jsobj, 7)->Number()));
} else {
CheckElementValue(isolate, 0, jsobj, 7);
}
@@ -16241,7 +16255,7 @@ static void ObjectWithExternalArrayTestHelper(
CHECK_EQ(2, result->Int32Value());
CHECK_EQ(2,
static_cast<int>(
- jsobj->GetElement(isolate, 6)->ToObjectChecked()->Number()));
+ i::Object::GetElement(isolate, jsobj, 6)->Number()));
if (array_type != v8::kExternalFloat32Array &&
array_type != v8::kExternalFloat64Array) {
@@ -16410,7 +16424,7 @@ static void FixedTypedArrayTestHelper(
v8::Handle<v8::Object> obj = v8::Object::New(CcTest::isolate());
i::Handle<i::JSObject> jsobj = v8::Utils::OpenHandle(*obj);
i::Handle<i::Map> fixed_array_map =
- isolate->factory()->GetElementsTransitionMap(jsobj, elements_kind);
+ i::JSObject::GetElementsTransitionMap(jsobj, elements_kind);
jsobj->set_map(*fixed_array_map);
jsobj->set_elements(*fixed_array);
@@ -16521,7 +16535,7 @@ static void ExternalArrayTestHelper(v8::ExternalArrayType array_type,
kElementCount);
CHECK_EQ(1,
static_cast<int>(
- jsobj->GetElement(isolate, 1)->ToObjectChecked()->Number()));
+ i::Object::GetElement(isolate, jsobj, 1)->Number()));
ObjectWithExternalArrayTestHelper<ExternalArrayClass, ElementType>(
context.local(), obj, kElementCount, array_type, low, high);
@@ -16998,19 +17012,20 @@ THREADED_TEST(ScriptContextDependence) {
LocalContext c1;
v8::HandleScope scope(c1->GetIsolate());
const char *source = "foo";
- v8::Handle<v8::Script> dep =
- v8::Script::Compile(v8::String::NewFromUtf8(c1->GetIsolate(), source));
- v8::Handle<v8::Script> indep =
- v8::Script::New(v8::String::NewFromUtf8(c1->GetIsolate(), source));
+ v8::Handle<v8::Script> dep = v8_compile(source);
+ v8::ScriptCompiler::Source script_source(v8::String::NewFromUtf8(
+ c1->GetIsolate(), source));
+ v8::Handle<v8::UnboundScript> indep =
+ v8::ScriptCompiler::CompileUnbound(c1->GetIsolate(), &script_source);
c1->Global()->Set(v8::String::NewFromUtf8(c1->GetIsolate(), "foo"),
v8::Integer::New(c1->GetIsolate(), 100));
CHECK_EQ(dep->Run()->Int32Value(), 100);
- CHECK_EQ(indep->Run()->Int32Value(), 100);
+ CHECK_EQ(indep->BindToCurrentContext()->Run()->Int32Value(), 100);
LocalContext c2;
c2->Global()->Set(v8::String::NewFromUtf8(c2->GetIsolate(), "foo"),
v8::Integer::New(c2->GetIsolate(), 101));
CHECK_EQ(dep->Run()->Int32Value(), 100);
- CHECK_EQ(indep->Run()->Int32Value(), 101);
+ CHECK_EQ(indep->BindToCurrentContext()->Run()->Int32Value(), 101);
}
@@ -17023,7 +17038,10 @@ THREADED_TEST(StackTrace) {
v8::String::NewFromUtf8(context->GetIsolate(), source);
v8::Handle<v8::String> origin =
v8::String::NewFromUtf8(context->GetIsolate(), "stack-trace-test");
- v8::Script::New(src, origin)->Run();
+ v8::ScriptCompiler::Source script_source(src, v8::ScriptOrigin(origin));
+ v8::ScriptCompiler::CompileUnbound(context->GetIsolate(), &script_source)
+ ->BindToCurrentContext()
+ ->Run();
CHECK(try_catch.HasCaught());
v8::String::Utf8Value stack(try_catch.StackTrace());
CHECK(strstr(*stack, "at foo (stack-trace-test") != NULL);
@@ -17129,8 +17147,12 @@ TEST(CaptureStackTrace) {
"var x;eval('new foo();');";
v8::Handle<v8::String> overview_src =
v8::String::NewFromUtf8(isolate, overview_source);
+ v8::ScriptCompiler::Source script_source(overview_src,
+ v8::ScriptOrigin(origin));
v8::Handle<Value> overview_result(
- v8::Script::New(overview_src, origin)->Run());
+ v8::ScriptCompiler::CompileUnbound(isolate, &script_source)
+ ->BindToCurrentContext()
+ ->Run());
CHECK(!overview_result.IsEmpty());
CHECK(overview_result->IsObject());
@@ -17149,9 +17171,11 @@ TEST(CaptureStackTrace) {
v8::Handle<v8::Integer> line_offset = v8::Integer::New(isolate, 3);
v8::Handle<v8::Integer> column_offset = v8::Integer::New(isolate, 5);
v8::ScriptOrigin detailed_origin(origin, line_offset, column_offset);
- v8::Handle<v8::Script> detailed_script(
- v8::Script::New(detailed_src, &detailed_origin));
- v8::Handle<Value> detailed_result(detailed_script->Run());
+ v8::ScriptCompiler::Source script_source2(detailed_src, detailed_origin);
+ v8::Handle<v8::UnboundScript> detailed_script(
+ v8::ScriptCompiler::CompileUnbound(isolate, &script_source2));
+ v8::Handle<Value> detailed_result(
+ detailed_script->BindToCurrentContext()->Run());
CHECK(!detailed_result.IsEmpty());
CHECK(detailed_result->IsObject());
}
@@ -17176,13 +17200,14 @@ TEST(CaptureStackTraceForUncaughtException) {
v8::V8::AddMessageListener(StackTraceForUncaughtExceptionListener);
v8::V8::SetCaptureStackTraceForUncaughtExceptions(true);
- Script::Compile(v8_str("function foo() {\n"
- " throw 1;\n"
- "};\n"
- "function bar() {\n"
- " foo();\n"
- "};"),
- v8_str("origin"))->Run();
+ CompileRunWithOrigin(
+ "function foo() {\n"
+ " throw 1;\n"
+ "};\n"
+ "function bar() {\n"
+ " foo();\n"
+ "};",
+ "origin");
v8::Local<v8::Object> global = env->Global();
Local<Value> trouble = global->Get(v8_str("bar"));
CHECK(trouble->IsFunction());
@@ -17417,9 +17442,7 @@ TEST(ScriptIdInStackTrace) {
" AnalyzeScriptIdInStack();"
"}\n"
"foo();\n");
- v8::ScriptOrigin origin =
- v8::ScriptOrigin(v8::String::NewFromUtf8(isolate, "test"));
- v8::Local<v8::Script> script(v8::Script::Compile(scriptSource, &origin));
+ v8::Local<v8::Script> script = CompileWithOrigin(scriptSource, "test");
script->Run();
for (int i = 0; i < 2; i++) {
CHECK(scriptIdInStack[i] != v8::Message::kNoScriptIdInfo);
@@ -17520,10 +17543,33 @@ TEST(DynamicWithSourceURLInStackTrace) {
}
+TEST(DynamicWithSourceURLInStackTraceString) {
+ LocalContext context;
+ v8::HandleScope scope(context->GetIsolate());
+
+ const char *source =
+ "function outer() {\n"
+ " function foo() {\n"
+ " FAIL.FAIL;\n"
+ " }\n"
+ " foo();\n"
+ "}\n"
+ "outer()\n%s";
+
+ i::ScopedVector<char> code(1024);
+ i::OS::SNPrintF(code, source, "//# sourceURL=source_url");
+ v8::TryCatch try_catch;
+ CompileRunWithOrigin(code.start(), "", 0, 0);
+ CHECK(try_catch.HasCaught());
+ v8::String::Utf8Value stack(try_catch.StackTrace());
+ CHECK(strstr(*stack, "at foo (source_url:3:5)") != NULL);
+}
+
+
static void CreateGarbageInOldSpace() {
i::Factory* factory = CcTest::i_isolate()->factory();
v8::HandleScope scope(CcTest::isolate());
- i::AlwaysAllocateScope always_allocate;
+ i::AlwaysAllocateScope always_allocate(CcTest::i_isolate());
for (int i = 0; i < 1000; i++) {
factory->NewFixedArray(1000, i::TENURED);
}
@@ -17627,7 +17673,7 @@ TEST(Regress2107) {
TEST(Regress2333) {
LocalContext env;
for (int i = 0; i < 3; i++) {
- CcTest::heap()->PerformScavenge();
+ CcTest::heap()->CollectGarbage(i::NEW_SPACE);
}
}
@@ -18421,14 +18467,14 @@ TEST(SetterOnConstructorPrototype) {
"C2.prototype.__proto__ = P;");
v8::Local<v8::Script> script;
- script = v8::Script::Compile(v8_str("new C1();"));
+ script = v8_compile("new C1();");
for (int i = 0; i < 10; i++) {
v8::Handle<v8::Object> c1 = v8::Handle<v8::Object>::Cast(script->Run());
CHECK_EQ(42, c1->Get(v8_str("x"))->Int32Value());
CHECK_EQ(23, c1->Get(v8_str("y"))->Int32Value());
}
- script = v8::Script::Compile(v8_str("new C2();"));
+script = v8_compile("new C2();");
for (int i = 0; i < 10; i++) {
v8::Handle<v8::Object> c2 = v8::Handle<v8::Object>::Cast(script->Run());
CHECK_EQ(42, c2->Get(v8_str("x"))->Int32Value());
@@ -18473,14 +18519,14 @@ THREADED_TEST(InterceptorOnConstructorPrototype) {
"C2.prototype.__proto__ = P;");
v8::Local<v8::Script> script;
- script = v8::Script::Compile(v8_str("new C1();"));
+ script = v8_compile("new C1();");
for (int i = 0; i < 10; i++) {
v8::Handle<v8::Object> c1 = v8::Handle<v8::Object>::Cast(script->Run());
CHECK_EQ(23, c1->Get(v8_str("x"))->Int32Value());
CHECK_EQ(42, c1->Get(v8_str("y"))->Int32Value());
}
- script = v8::Script::Compile(v8_str("new C2();"));
+ script = v8_compile("new C2();");
for (int i = 0; i < 10; i++) {
v8::Handle<v8::Object> c2 = v8::Handle<v8::Object>::Cast(script->Run());
CHECK_EQ(23, c2->Get(v8_str("x"))->Int32Value());
@@ -18508,7 +18554,7 @@ TEST(Regress618) {
// This compile will add the code to the compilation cache.
CompileRun(source);
- script = v8::Script::Compile(v8_str("new C1();"));
+ script = v8_compile("new C1();");
// Allow enough iterations for the inobject slack tracking logic
// to finalize instance size and install the fast construct stub.
for (int i = 0; i < 256; i++) {
@@ -18527,7 +18573,7 @@ TEST(Regress618) {
// This compile will get the code from the compilation cache.
CompileRun(source);
- script = v8::Script::Compile(v8_str("new C1();"));
+ script = v8_compile("new C1();");
for (int i = 0; i < 10; i++) {
v8::Handle<v8::Object> c1 = v8::Handle<v8::Object>::Cast(script->Run());
CHECK_EQ(42, c1->Get(v8_str("x"))->Int32Value());
@@ -18540,6 +18586,8 @@ int prologue_call_count = 0;
int epilogue_call_count = 0;
int prologue_call_count_second = 0;
int epilogue_call_count_second = 0;
+int prologue_call_count_alloc = 0;
+int epilogue_call_count_alloc = 0;
void PrologueCallback(v8::GCType, v8::GCCallbackFlags flags) {
CHECK_EQ(flags, v8::kNoGCCallbackFlags);
@@ -18601,6 +18649,46 @@ void EpilogueCallbackSecond(v8::Isolate* isolate,
}
+void PrologueCallbackAlloc(v8::Isolate* isolate,
+ v8::GCType,
+ v8::GCCallbackFlags flags) {
+ v8::HandleScope scope(isolate);
+
+ CHECK_EQ(flags, v8::kNoGCCallbackFlags);
+ CHECK_EQ(gc_callbacks_isolate, isolate);
+ ++prologue_call_count_alloc;
+
+ // Simulate full heap to see if we will reenter this callback
+ SimulateFullSpace(CcTest::heap()->new_space());
+
+ Local<Object> obj = Object::New(isolate);
+ CHECK(!obj.IsEmpty());
+
+ CcTest::heap()->CollectAllGarbage(
+ i::Heap::kAbortIncrementalMarkingMask);
+}
+
+
+void EpilogueCallbackAlloc(v8::Isolate* isolate,
+ v8::GCType,
+ v8::GCCallbackFlags flags) {
+ v8::HandleScope scope(isolate);
+
+ CHECK_EQ(flags, v8::kNoGCCallbackFlags);
+ CHECK_EQ(gc_callbacks_isolate, isolate);
+ ++epilogue_call_count_alloc;
+
+ // Simulate full heap to see if we will reenter this callback
+ SimulateFullSpace(CcTest::heap()->new_space());
+
+ Local<Object> obj = Object::New(isolate);
+ CHECK(!obj.IsEmpty());
+
+ CcTest::heap()->CollectAllGarbage(
+ i::Heap::kAbortIncrementalMarkingMask);
+}
+
+
TEST(GCCallbacksOld) {
LocalContext context;
@@ -18667,6 +18755,17 @@ TEST(GCCallbacks) {
CHECK_EQ(2, epilogue_call_count);
CHECK_EQ(2, prologue_call_count_second);
CHECK_EQ(2, epilogue_call_count_second);
+
+ CHECK_EQ(0, prologue_call_count_alloc);
+ CHECK_EQ(0, epilogue_call_count_alloc);
+ isolate->AddGCPrologueCallback(PrologueCallbackAlloc);
+ isolate->AddGCEpilogueCallback(EpilogueCallbackAlloc);
+ CcTest::heap()->CollectAllGarbage(
+ i::Heap::kAbortIncrementalMarkingMask);
+ CHECK_EQ(1, prologue_call_count_alloc);
+ CHECK_EQ(1, epilogue_call_count_alloc);
+ isolate->RemoveGCPrologueCallback(PrologueCallbackAlloc);
+ isolate->RemoveGCEpilogueCallback(EpilogueCallbackAlloc);
}
@@ -19284,7 +19383,6 @@ TEST(IsolateDifferentContexts) {
class InitDefaultIsolateThread : public v8::internal::Thread {
public:
enum TestCase {
- IgnoreOOM,
SetResourceConstraints,
SetFatalHandler,
SetCounterFunction,
@@ -19301,34 +19399,30 @@ class InitDefaultIsolateThread : public v8::internal::Thread {
v8::Isolate* isolate = v8::Isolate::New();
isolate->Enter();
switch (testCase_) {
- case IgnoreOOM:
- v8::V8::IgnoreOutOfMemoryException();
- break;
-
- case SetResourceConstraints: {
- static const int K = 1024;
- v8::ResourceConstraints constraints;
- constraints.set_max_young_space_size(256 * K);
- constraints.set_max_old_space_size(4 * K * K);
- v8::SetResourceConstraints(CcTest::isolate(), &constraints);
- break;
- }
+ case SetResourceConstraints: {
+ static const int K = 1024;
+ v8::ResourceConstraints constraints;
+ constraints.set_max_young_space_size(256 * K);
+ constraints.set_max_old_space_size(4 * K * K);
+ v8::SetResourceConstraints(CcTest::isolate(), &constraints);
+ break;
+ }
- case SetFatalHandler:
- v8::V8::SetFatalErrorHandler(NULL);
- break;
+ case SetFatalHandler:
+ v8::V8::SetFatalErrorHandler(NULL);
+ break;
- case SetCounterFunction:
- v8::V8::SetCounterFunction(NULL);
- break;
+ case SetCounterFunction:
+ v8::V8::SetCounterFunction(NULL);
+ break;
- case SetCreateHistogramFunction:
- v8::V8::SetCreateHistogramFunction(NULL);
- break;
+ case SetCreateHistogramFunction:
+ v8::V8::SetCreateHistogramFunction(NULL);
+ break;
- case SetAddHistogramSampleFunction:
- v8::V8::SetAddHistogramSampleFunction(NULL);
- break;
+ case SetAddHistogramSampleFunction:
+ v8::V8::SetAddHistogramSampleFunction(NULL);
+ break;
}
isolate->Exit();
isolate->Dispose();
@@ -19352,31 +19446,26 @@ static void InitializeTestHelper(InitDefaultIsolateThread::TestCase testCase) {
TEST(InitializeDefaultIsolateOnSecondaryThread1) {
- InitializeTestHelper(InitDefaultIsolateThread::IgnoreOOM);
-}
-
-
-TEST(InitializeDefaultIsolateOnSecondaryThread2) {
InitializeTestHelper(InitDefaultIsolateThread::SetResourceConstraints);
}
-TEST(InitializeDefaultIsolateOnSecondaryThread3) {
+TEST(InitializeDefaultIsolateOnSecondaryThread2) {
InitializeTestHelper(InitDefaultIsolateThread::SetFatalHandler);
}
-TEST(InitializeDefaultIsolateOnSecondaryThread4) {
+TEST(InitializeDefaultIsolateOnSecondaryThread3) {
InitializeTestHelper(InitDefaultIsolateThread::SetCounterFunction);
}
-TEST(InitializeDefaultIsolateOnSecondaryThread5) {
+TEST(InitializeDefaultIsolateOnSecondaryThread4) {
InitializeTestHelper(InitDefaultIsolateThread::SetCreateHistogramFunction);
}
-TEST(InitializeDefaultIsolateOnSecondaryThread6) {
+TEST(InitializeDefaultIsolateOnSecondaryThread5) {
InitializeTestHelper(InitDefaultIsolateThread::SetAddHistogramSampleFunction);
}
@@ -20491,6 +20580,102 @@ TEST(CallCompletedCallbackTwoExceptions) {
}
+static void MicrotaskOne(const v8::FunctionCallbackInfo<Value>& info) {
+ v8::HandleScope scope(info.GetIsolate());
+ CompileRun("ext1Calls++;");
+}
+
+
+static void MicrotaskTwo(const v8::FunctionCallbackInfo<Value>& info) {
+ v8::HandleScope scope(info.GetIsolate());
+ CompileRun("ext2Calls++;");
+}
+
+
+TEST(EnqueueMicrotask) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ CompileRun(
+ "var ext1Calls = 0;"
+ "var ext2Calls = 0;");
+ CompileRun("1+1;");
+ CHECK_EQ(0, CompileRun("ext1Calls")->Int32Value());
+ CHECK_EQ(0, CompileRun("ext2Calls")->Int32Value());
+
+ v8::V8::EnqueueMicrotask(env->GetIsolate(),
+ Function::New(env->GetIsolate(), MicrotaskOne));
+ CompileRun("1+1;");
+ CHECK_EQ(1, CompileRun("ext1Calls")->Int32Value());
+ CHECK_EQ(0, CompileRun("ext2Calls")->Int32Value());
+
+ v8::V8::EnqueueMicrotask(env->GetIsolate(),
+ Function::New(env->GetIsolate(), MicrotaskOne));
+ v8::V8::EnqueueMicrotask(env->GetIsolate(),
+ Function::New(env->GetIsolate(), MicrotaskTwo));
+ CompileRun("1+1;");
+ CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value());
+ CHECK_EQ(1, CompileRun("ext2Calls")->Int32Value());
+
+ v8::V8::EnqueueMicrotask(env->GetIsolate(),
+ Function::New(env->GetIsolate(), MicrotaskTwo));
+ CompileRun("1+1;");
+ CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value());
+ CHECK_EQ(2, CompileRun("ext2Calls")->Int32Value());
+
+ CompileRun("1+1;");
+ CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value());
+ CHECK_EQ(2, CompileRun("ext2Calls")->Int32Value());
+}
+
+
+TEST(SetAutorunMicrotasks) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ CompileRun(
+ "var ext1Calls = 0;"
+ "var ext2Calls = 0;");
+ CompileRun("1+1;");
+ CHECK_EQ(0, CompileRun("ext1Calls")->Int32Value());
+ CHECK_EQ(0, CompileRun("ext2Calls")->Int32Value());
+
+ v8::V8::EnqueueMicrotask(env->GetIsolate(),
+ Function::New(env->GetIsolate(), MicrotaskOne));
+ CompileRun("1+1;");
+ CHECK_EQ(1, CompileRun("ext1Calls")->Int32Value());
+ CHECK_EQ(0, CompileRun("ext2Calls")->Int32Value());
+
+ V8::SetAutorunMicrotasks(env->GetIsolate(), false);
+ v8::V8::EnqueueMicrotask(env->GetIsolate(),
+ Function::New(env->GetIsolate(), MicrotaskOne));
+ v8::V8::EnqueueMicrotask(env->GetIsolate(),
+ Function::New(env->GetIsolate(), MicrotaskTwo));
+ CompileRun("1+1;");
+ CHECK_EQ(1, CompileRun("ext1Calls")->Int32Value());
+ CHECK_EQ(0, CompileRun("ext2Calls")->Int32Value());
+
+ V8::RunMicrotasks(env->GetIsolate());
+ CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value());
+ CHECK_EQ(1, CompileRun("ext2Calls")->Int32Value());
+
+ v8::V8::EnqueueMicrotask(env->GetIsolate(),
+ Function::New(env->GetIsolate(), MicrotaskTwo));
+ CompileRun("1+1;");
+ CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value());
+ CHECK_EQ(1, CompileRun("ext2Calls")->Int32Value());
+
+ V8::RunMicrotasks(env->GetIsolate());
+ CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value());
+ CHECK_EQ(2, CompileRun("ext2Calls")->Int32Value());
+
+ V8::SetAutorunMicrotasks(env->GetIsolate(), true);
+ v8::V8::EnqueueMicrotask(env->GetIsolate(),
+ Function::New(env->GetIsolate(), MicrotaskTwo));
+ CompileRun("1+1;");
+ CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value());
+ CHECK_EQ(3, CompileRun("ext2Calls")->Int32Value());
+}
+
+
static int probes_counter = 0;
static int misses_counter = 0;
static int updates_counter = 0;
@@ -21735,8 +21920,9 @@ THREADED_TEST(FunctionNew) {
i::Smi::cast(v8::Utils::OpenHandle(*func)
->shared()->get_api_func_data()->serial_number())->value();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::Object* elm = i_isolate->native_context()->function_cache()
- ->GetElementNoExceptionThrown(i_isolate, serial_number);
+ i::Handle<i::JSObject> cache(i_isolate->native_context()->function_cache());
+ i::Handle<i::Object> elm =
+ i::Object::GetElementNoExceptionThrown(i_isolate, cache, serial_number);
CHECK(elm->IsUndefined());
// Verify that each Function::New creates a new function instance
Local<Object> data2 = v8::Object::New(isolate);
@@ -21814,29 +22000,31 @@ class ApiCallOptimizationChecker {
}
CHECK(holder == info.Holder());
count++;
- }
-
- // TODO(dcarney): move this to v8.h
- static void SetAccessorProperty(Local<Object> object,
- Local<String> name,
- Local<Function> getter,
- Local<Function> setter = Local<Function>()) {
- i::Isolate* isolate = CcTest::i_isolate();
- v8::AccessControl settings = v8::DEFAULT;
- v8::PropertyAttribute attribute = v8::None;
- i::Handle<i::Object> getter_i = v8::Utils::OpenHandle(*getter);
- i::Handle<i::Object> setter_i = v8::Utils::OpenHandle(*setter, true);
- if (setter_i.is_null()) setter_i = isolate->factory()->null_value();
- i::JSObject::DefineAccessor(v8::Utils::OpenHandle(*object),
- v8::Utils::OpenHandle(*name),
- getter_i,
- setter_i,
- static_cast<PropertyAttributes>(attribute),
- settings);
+ info.GetReturnValue().Set(v8_str("returned"));
}
public:
- void Run(bool use_signature, bool global) {
+ enum SignatureType {
+ kNoSignature,
+ kSignatureOnReceiver,
+ kSignatureOnPrototype
+ };
+
+ void RunAll() {
+ SignatureType signature_types[] =
+ {kNoSignature, kSignatureOnReceiver, kSignatureOnPrototype};
+ for (unsigned i = 0; i < ARRAY_SIZE(signature_types); i++) {
+ SignatureType signature_type = signature_types[i];
+ for (int j = 0; j < 2; j++) {
+ bool global = j == 0;
+ int key = signature_type +
+ ARRAY_SIZE(signature_types) * (global ? 1 : 0);
+ Run(signature_type, global, key);
+ }
+ }
+ }
+
+ void Run(SignatureType signature_type, bool global, int key) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
// Build a template for signature checks.
@@ -21849,8 +22037,15 @@ class ApiCallOptimizationChecker {
Local<v8::FunctionTemplate> function_template
= FunctionTemplate::New(isolate);
function_template->Inherit(parent_template);
- if (use_signature) {
- signature = v8::Signature::New(isolate, parent_template);
+ switch (signature_type) {
+ case kNoSignature:
+ break;
+ case kSignatureOnReceiver:
+ signature = v8::Signature::New(isolate, function_template);
+ break;
+ case kSignatureOnPrototype:
+ signature = v8::Signature::New(isolate, parent_template);
+ break;
}
signature_template = function_template->InstanceTemplate();
}
@@ -21864,19 +22059,21 @@ class ApiCallOptimizationChecker {
// Get the holder objects.
Local<Object> inner_global =
Local<Object>::Cast(context->Global()->GetPrototype());
- Local<Object> function_holder =
- Local<Object>::Cast(function_receiver->GetPrototype());
- // Install function on hidden prototype object.
+ // Install functions on hidden prototype object if there is one.
data = Object::New(isolate);
Local<FunctionTemplate> function_template = FunctionTemplate::New(
isolate, OptimizationCallback, data, signature);
Local<Function> function = function_template->GetFunction();
- Local<Object> global_holder = Local<Object>::Cast(
- inner_global->GetPrototype());
+ Local<Object> global_holder = inner_global;
+ Local<Object> function_holder = function_receiver;
+ if (signature_type == kSignatureOnPrototype) {
+ function_holder = Local<Object>::Cast(function_holder->GetPrototype());
+ global_holder = Local<Object>::Cast(global_holder->GetPrototype());
+ }
global_holder->Set(v8_str("g_f"), function);
- SetAccessorProperty(global_holder, v8_str("g_acc"), function, function);
+ global_holder->SetAccessorProperty(v8_str("g_acc"), function, function);
function_holder->Set(v8_str("f"), function);
- SetAccessorProperty(function_holder, v8_str("acc"), function, function);
+ function_holder->SetAccessorProperty(v8_str("acc"), function, function);
// Initialize expected values.
callee = function;
count = 0;
@@ -21887,7 +22084,7 @@ class ApiCallOptimizationChecker {
holder = function_receiver;
// If not using a signature, add something else to the prototype chain
// to test the case that holder != receiver
- if (!use_signature) {
+ if (signature_type == kNoSignature) {
receiver = Local<Object>::Cast(CompileRun(
"var receiver_subclass = {};\n"
"receiver_subclass.__proto__ = function_receiver;\n"
@@ -21899,48 +22096,53 @@ class ApiCallOptimizationChecker {
}
}
// With no signature, the holder is not set.
- if (!use_signature) holder = receiver;
+ if (signature_type == kNoSignature) holder = receiver;
// build wrap_function
- int key = (use_signature ? 1 : 0) + 2 * (global ? 1 : 0);
i::ScopedVector<char> wrap_function(200);
if (global) {
i::OS::SNPrintF(
wrap_function,
"function wrap_f_%d() { var f = g_f; return f(); }\n"
"function wrap_get_%d() { return this.g_acc; }\n"
- "function wrap_set_%d() { this.g_acc = 1; }\n",
+ "function wrap_set_%d() { return this.g_acc = 1; }\n",
key, key, key);
} else {
i::OS::SNPrintF(
wrap_function,
"function wrap_f_%d() { return receiver_subclass.f(); }\n"
"function wrap_get_%d() { return receiver_subclass.acc; }\n"
- "function wrap_set_%d() { receiver_subclass.acc = 1; }\n",
+ "function wrap_set_%d() { return receiver_subclass.acc = 1; }\n",
key, key, key);
}
// build source string
- i::ScopedVector<char> source(500);
+ i::ScopedVector<char> source(1000);
i::OS::SNPrintF(
source,
"%s\n" // wrap functions
- "function wrap_f() { wrap_f_%d(); }\n"
- "function wrap_get() { wrap_get_%d(); }\n"
- "function wrap_set() { wrap_set_%d(); }\n"
+ "function wrap_f() { return wrap_f_%d(); }\n"
+ "function wrap_get() { return wrap_get_%d(); }\n"
+ "function wrap_set() { return wrap_set_%d(); }\n"
+ "check = function(returned) {\n"
+ " if (returned !== 'returned') { throw returned; }\n"
+ "}\n"
"\n"
- "wrap_f();\n"
- "wrap_f();\n"
+ "check(wrap_f());\n"
+ "check(wrap_f());\n"
"%%OptimizeFunctionOnNextCall(wrap_f_%d);\n"
- "wrap_f();\n"
+ "check(wrap_f());\n"
"\n"
- "wrap_get();\n"
- "wrap_get();\n"
+ "check(wrap_get());\n"
+ "check(wrap_get());\n"
"%%OptimizeFunctionOnNextCall(wrap_get_%d);\n"
- "wrap_get();\n"
+ "check(wrap_get());\n"
"\n"
- "wrap_set();\n"
- "wrap_set();\n"
+ "check = function(returned) {\n"
+ " if (returned !== 1) { throw returned; }\n"
+ "}\n"
+ "check(wrap_set());\n"
+ "check(wrap_set());\n"
"%%OptimizeFunctionOnNextCall(wrap_set_%d);\n"
- "wrap_set();\n",
+ "check(wrap_set());\n",
wrap_function.start(), key, key, key, key, key, key);
v8::TryCatch try_catch;
CompileRun(source.start());
@@ -21960,10 +22162,161 @@ int ApiCallOptimizationChecker::count = 0;
TEST(TestFunctionCallOptimization) {
i::FLAG_allow_natives_syntax = true;
ApiCallOptimizationChecker checker;
- checker.Run(true, true);
- checker.Run(false, true);
- checker.Run(true, false);
- checker.Run(false, false);
+ checker.RunAll();
+}
+
+
+static const char* last_event_message;
+static int last_event_status;
+void StoringEventLoggerCallback(const char* message, int status) {
+ last_event_message = message;
+ last_event_status = status;
+}
+
+
+TEST(EventLogging) {
+ v8::Isolate* isolate = CcTest::isolate();
+ isolate->SetEventLogger(StoringEventLoggerCallback);
+ v8::internal::HistogramTimer* histogramTimer =
+ new v8::internal::HistogramTimer(
+ "V8.Test", 0, 10000, 50,
+ reinterpret_cast<v8::internal::Isolate*>(isolate));
+ histogramTimer->Start();
+ CHECK_EQ("V8.Test", last_event_message);
+ CHECK_EQ(0, last_event_status);
+ histogramTimer->Stop();
+ CHECK_EQ("V8.Test", last_event_message);
+ CHECK_EQ(1, last_event_status);
+}
+
+
+TEST(Promises) {
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ Handle<Object> global = context->Global();
+
+ // Creation.
+ Handle<v8::Promise::Resolver> pr = v8::Promise::Resolver::New(isolate);
+ Handle<v8::Promise::Resolver> rr = v8::Promise::Resolver::New(isolate);
+ Handle<v8::Promise> p = pr->GetPromise();
+ Handle<v8::Promise> r = rr->GetPromise();
+
+ // IsPromise predicate.
+ CHECK(p->IsPromise());
+ CHECK(r->IsPromise());
+ Handle<Value> o = v8::Object::New(isolate);
+ CHECK(!o->IsPromise());
+
+ // Resolution and rejection.
+ pr->Resolve(v8::Integer::New(isolate, 1));
+ CHECK(p->IsPromise());
+ rr->Reject(v8::Integer::New(isolate, 2));
+ CHECK(r->IsPromise());
+
+ // Chaining non-pending promises.
+ CompileRun(
+ "var x1 = 0;\n"
+ "var x2 = 0;\n"
+ "function f1(x) { x1 = x; return x+1 };\n"
+ "function f2(x) { x2 = x; return x+1 };\n");
+ Handle<Function> f1 = Handle<Function>::Cast(global->Get(v8_str("f1")));
+ Handle<Function> f2 = Handle<Function>::Cast(global->Get(v8_str("f2")));
+
+ p->Chain(f1);
+ CHECK_EQ(0, global->Get(v8_str("x1"))->Int32Value());
+ V8::RunMicrotasks(isolate);
+ CHECK_EQ(1, global->Get(v8_str("x1"))->Int32Value());
+
+ p->Catch(f2);
+ V8::RunMicrotasks(isolate);
+ CHECK_EQ(0, global->Get(v8_str("x2"))->Int32Value());
+
+ r->Catch(f2);
+ CHECK_EQ(0, global->Get(v8_str("x2"))->Int32Value());
+ V8::RunMicrotasks(isolate);
+ CHECK_EQ(2, global->Get(v8_str("x2"))->Int32Value());
+
+ r->Chain(f1);
+ V8::RunMicrotasks(isolate);
+ CHECK_EQ(1, global->Get(v8_str("x1"))->Int32Value());
+
+ // Chaining pending promises.
+ CompileRun("x1 = x2 = 0;");
+ pr = v8::Promise::Resolver::New(isolate);
+ rr = v8::Promise::Resolver::New(isolate);
+
+ pr->GetPromise()->Chain(f1);
+ rr->GetPromise()->Catch(f2);
+ V8::RunMicrotasks(isolate);
+ CHECK_EQ(0, global->Get(v8_str("x1"))->Int32Value());
+ CHECK_EQ(0, global->Get(v8_str("x2"))->Int32Value());
+
+ pr->Resolve(v8::Integer::New(isolate, 1));
+ rr->Reject(v8::Integer::New(isolate, 2));
+ CHECK_EQ(0, global->Get(v8_str("x1"))->Int32Value());
+ CHECK_EQ(0, global->Get(v8_str("x2"))->Int32Value());
+
+ V8::RunMicrotasks(isolate);
+ CHECK_EQ(1, global->Get(v8_str("x1"))->Int32Value());
+ CHECK_EQ(2, global->Get(v8_str("x2"))->Int32Value());
+
+ // Multi-chaining.
+ CompileRun("x1 = x2 = 0;");
+ pr = v8::Promise::Resolver::New(isolate);
+ pr->GetPromise()->Chain(f1)->Chain(f2);
+ pr->Resolve(v8::Integer::New(isolate, 3));
+ CHECK_EQ(0, global->Get(v8_str("x1"))->Int32Value());
+ CHECK_EQ(0, global->Get(v8_str("x2"))->Int32Value());
+ V8::RunMicrotasks(isolate);
+ CHECK_EQ(3, global->Get(v8_str("x1"))->Int32Value());
+ CHECK_EQ(4, global->Get(v8_str("x2"))->Int32Value());
+
+ CompileRun("x1 = x2 = 0;");
+ rr = v8::Promise::Resolver::New(isolate);
+ rr->GetPromise()->Catch(f1)->Chain(f2);
+ rr->Reject(v8::Integer::New(isolate, 3));
+ CHECK_EQ(0, global->Get(v8_str("x1"))->Int32Value());
+ CHECK_EQ(0, global->Get(v8_str("x2"))->Int32Value());
+ V8::RunMicrotasks(isolate);
+ CHECK_EQ(3, global->Get(v8_str("x1"))->Int32Value());
+ CHECK_EQ(4, global->Get(v8_str("x2"))->Int32Value());
+}
+
+
+TEST(DisallowJavascriptExecutionScope) {
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::Isolate::DisallowJavascriptExecutionScope no_js(
+ isolate, v8::Isolate::DisallowJavascriptExecutionScope::CRASH_ON_FAILURE);
+ CompileRun("2+2");
+}
+
+
+TEST(AllowJavascriptExecutionScope) {
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::Isolate::DisallowJavascriptExecutionScope no_js(
+ isolate, v8::Isolate::DisallowJavascriptExecutionScope::CRASH_ON_FAILURE);
+ v8::Isolate::DisallowJavascriptExecutionScope throw_js(
+ isolate, v8::Isolate::DisallowJavascriptExecutionScope::THROW_ON_FAILURE);
+ { v8::Isolate::AllowJavascriptExecutionScope yes_js(isolate);
+ CompileRun("1+1");
+ }
+}
+
+
+TEST(ThrowOnJavascriptExecution) {
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::TryCatch try_catch;
+ v8::Isolate::DisallowJavascriptExecutionScope throw_js(
+ isolate, v8::Isolate::DisallowJavascriptExecutionScope::THROW_ON_FAILURE);
+ CompileRun("1+1");
+ CHECK(try_catch.HasCaught());
}
diff --git a/deps/v8/test/cctest/test-assembler-arm.cc b/deps/v8/test/cctest/test-assembler-arm.cc
index b21dc34dc..9c1c04fe3 100644
--- a/deps/v8/test/cctest/test-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-assembler-arm.cc
@@ -1266,6 +1266,10 @@ TEST(15) {
uint32_t dstA1;
uint32_t dstA2;
uint32_t dstA3;
+ uint32_t dstA4;
+ uint32_t dstA5;
+ uint32_t dstA6;
+ uint32_t dstA7;
} T;
T t;
@@ -1291,7 +1295,14 @@ TEST(15) {
__ add(r4, r0, Operand(OFFSET_OF(T, dstA0)));
__ vst1(Neon8, NeonListOperand(d0, 2), NeonMemOperand(r4));
- __ ldm(ia_w, sp, r4.bit() | pc.bit());
+ // The same expansion, but with different source and destination registers.
+ __ add(r4, r0, Operand(OFFSET_OF(T, srcA0)));
+ __ vld1(Neon8, NeonListOperand(d1), NeonMemOperand(r4));
+ __ vmovl(NeonU8, q1, d1);
+ __ add(r4, r0, Operand(OFFSET_OF(T, dstA4)));
+ __ vst1(Neon8, NeonListOperand(d2, 2), NeonMemOperand(r4));
+
+ __ ldm(ia_w, sp, r4.bit() | pc.bit());
CodeDesc desc;
assm.GetCode(&desc);
@@ -1326,6 +1337,10 @@ TEST(15) {
t.dstA1 = 0;
t.dstA2 = 0;
t.dstA3 = 0;
+ t.dstA4 = 0;
+ t.dstA5 = 0;
+ t.dstA6 = 0;
+ t.dstA7 = 0;
Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
USE(dummy);
CHECK_EQ(0x01020304, t.dst0);
@@ -1340,6 +1355,10 @@ TEST(15) {
CHECK_EQ(0x00410042, t.dstA1);
CHECK_EQ(0x00830084, t.dstA2);
CHECK_EQ(0x00810082, t.dstA3);
+ CHECK_EQ(0x00430044, t.dstA4);
+ CHECK_EQ(0x00410042, t.dstA5);
+ CHECK_EQ(0x00830084, t.dstA6);
+ CHECK_EQ(0x00810082, t.dstA7);
}
}
diff --git a/deps/v8/test/cctest/test-assembler-arm64.cc b/deps/v8/test/cctest/test-assembler-arm64.cc
new file mode 100644
index 000000000..51c202fc0
--- /dev/null
+++ b/deps/v8/test/cctest/test-assembler-arm64.cc
@@ -0,0 +1,10801 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <cmath>
+#include <limits>
+
+#include "v8.h"
+
+#include "macro-assembler.h"
+#include "arm64/simulator-arm64.h"
+#include "arm64/decoder-arm64-inl.h"
+#include "arm64/disasm-arm64.h"
+#include "arm64/utils-arm64.h"
+#include "cctest.h"
+#include "test-utils-arm64.h"
+
+using namespace v8::internal;
+
+// Test infrastructure.
+//
+// Tests are functions which accept no parameters and have no return values.
+// The testing code should not perform an explicit return once completed. For
+// example to test the mov immediate instruction a very simple test would be:
+//
+// TEST(mov_x0_one) {
+// SETUP();
+//
+// START();
+// __ mov(x0, Operand(1));
+// END();
+//
+// RUN();
+//
+// ASSERT_EQUAL_64(1, x0);
+//
+// TEARDOWN();
+// }
+//
+// Within a START ... END block all registers but sp can be modified. sp has to
+// be explicitly saved/restored. The END() macro replaces the function return
+// so it may appear multiple times in a test if the test has multiple exit
+// points.
+//
+// Once the test has been run all integer and floating point registers as well
+// as flags are accessible through a RegisterDump instance, see
+// utils-arm64.cc for more info on RegisterDump.
+//
+// We provide some helper assert to handle common cases:
+//
+// ASSERT_EQUAL_32(int32_t, int_32t)
+// ASSERT_EQUAL_FP32(float, float)
+// ASSERT_EQUAL_32(int32_t, W register)
+// ASSERT_EQUAL_FP32(float, S register)
+// ASSERT_EQUAL_64(int64_t, int_64t)
+// ASSERT_EQUAL_FP64(double, double)
+// ASSERT_EQUAL_64(int64_t, X register)
+// ASSERT_EQUAL_64(X register, X register)
+// ASSERT_EQUAL_FP64(double, D register)
+//
+// e.g. ASSERT_EQUAL_64(0.5, d30);
+//
+// If more advance computation is required before the assert then access the
+// RegisterDump named core directly:
+//
+// ASSERT_EQUAL_64(0x1234, core.xreg(0) & 0xffff);
+
+
+#if 0 // TODO(all): enable.
+static v8::Persistent<v8::Context> env;
+
+static void InitializeVM() {
+ if (env.IsEmpty()) {
+ env = v8::Context::New();
+ }
+}
+#endif
+
+#define __ masm.
+
+#define BUF_SIZE 8192
+#define SETUP() SETUP_SIZE(BUF_SIZE)
+
+#define INIT_V8() \
+ CcTest::InitializeVM(); \
+
+#ifdef USE_SIMULATOR
+
+// Run tests with the simulator.
+#define SETUP_SIZE(buf_size) \
+ Isolate* isolate = Isolate::Current(); \
+ HandleScope scope(isolate); \
+ ASSERT(isolate != NULL); \
+ byte* buf = new byte[buf_size]; \
+ MacroAssembler masm(isolate, buf, buf_size); \
+ Decoder<DispatchingDecoderVisitor>* decoder = \
+ new Decoder<DispatchingDecoderVisitor>(); \
+ Simulator simulator(decoder); \
+ PrintDisassembler* pdis = NULL; \
+ RegisterDump core;
+
+/* if (Cctest::trace_sim()) { \
+ pdis = new PrintDisassembler(stdout); \
+ decoder.PrependVisitor(pdis); \
+ } \
+ */
+
+// Reset the assembler and simulator, so that instructions can be generated,
+// but don't actually emit any code. This can be used by tests that need to
+// emit instructions at the start of the buffer. Note that START_AFTER_RESET
+// must be called before any callee-saved register is modified, and before an
+// END is encountered.
+//
+// Most tests should call START, rather than call RESET directly.
+#define RESET() \
+ __ Reset(); \
+ simulator.ResetState();
+
+#define START_AFTER_RESET() \
+ __ SetStackPointer(csp); \
+ __ PushCalleeSavedRegisters(); \
+ __ Debug("Start test.", __LINE__, TRACE_ENABLE | LOG_ALL);
+
+#define START() \
+ RESET(); \
+ START_AFTER_RESET();
+
+#define RUN() \
+ simulator.RunFrom(reinterpret_cast<Instruction*>(buf))
+
+#define END() \
+ __ Debug("End test.", __LINE__, TRACE_DISABLE | LOG_ALL); \
+ core.Dump(&masm); \
+ __ PopCalleeSavedRegisters(); \
+ __ Ret(); \
+ __ GetCode(NULL);
+
+#define TEARDOWN() \
+ delete pdis; \
+ delete[] buf;
+
+#else // ifdef USE_SIMULATOR.
+// Run the test on real hardware or models.
+#define SETUP_SIZE(buf_size) \
+ Isolate* isolate = Isolate::Current(); \
+ HandleScope scope(isolate); \
+ ASSERT(isolate != NULL); \
+ byte* buf = new byte[buf_size]; \
+ MacroAssembler masm(isolate, buf, buf_size); \
+ RegisterDump core; \
+ CPU::SetUp();
+
+#define RESET() \
+ __ Reset();
+
+#define START_AFTER_RESET() \
+ __ SetStackPointer(csp); \
+ __ PushCalleeSavedRegisters();
+
+#define START() \
+ RESET(); \
+ START_AFTER_RESET();
+
+#define RUN() \
+ CPU::FlushICache(buf, masm.SizeOfGeneratedCode()); \
+ { \
+ void (*test_function)(void); \
+ memcpy(&test_function, &buf, sizeof(buf)); \
+ test_function(); \
+ }
+
+#define END() \
+ core.Dump(&masm); \
+ __ PopCalleeSavedRegisters(); \
+ __ Ret(); \
+ __ GetCode(NULL);
+
+#define TEARDOWN() \
+ delete[] buf;
+
+#endif // ifdef USE_SIMULATOR.
+
+#define ASSERT_EQUAL_NZCV(expected) \
+ CHECK(EqualNzcv(expected, core.flags_nzcv()))
+
+#define ASSERT_EQUAL_REGISTERS(expected) \
+ CHECK(EqualRegisters(&expected, &core))
+
+#define ASSERT_EQUAL_32(expected, result) \
+ CHECK(Equal32(static_cast<uint32_t>(expected), &core, result))
+
+#define ASSERT_EQUAL_FP32(expected, result) \
+ CHECK(EqualFP32(expected, &core, result))
+
+#define ASSERT_EQUAL_64(expected, result) \
+ CHECK(Equal64(expected, &core, result))
+
+#define ASSERT_EQUAL_FP64(expected, result) \
+ CHECK(EqualFP64(expected, &core, result))
+
+#ifdef DEBUG
+#define ASSERT_LITERAL_POOL_SIZE(expected) \
+ CHECK((expected) == (__ LiteralPoolSize()))
+#else
+#define ASSERT_LITERAL_POOL_SIZE(expected) \
+ ((void) 0)
+#endif
+
+
+TEST(stack_ops) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ // save csp.
+ __ Mov(x29, csp);
+
+ // Set the csp to a known value.
+ __ Mov(x16, 0x1000);
+ __ Mov(csp, x16);
+ __ Mov(x0, csp);
+
+ // Add immediate to the csp, and move the result to a normal register.
+ __ Add(csp, csp, Operand(0x50));
+ __ Mov(x1, csp);
+
+ // Add extended to the csp, and move the result to a normal register.
+ __ Mov(x17, 0xfff);
+ __ Add(csp, csp, Operand(x17, SXTB));
+ __ Mov(x2, csp);
+
+ // Create an csp using a logical instruction, and move to normal register.
+ __ Orr(csp, xzr, Operand(0x1fff));
+ __ Mov(x3, csp);
+
+ // Write wcsp using a logical instruction.
+ __ Orr(wcsp, wzr, Operand(0xfffffff8L));
+ __ Mov(x4, csp);
+
+ // Write csp, and read back wcsp.
+ __ Orr(csp, xzr, Operand(0xfffffff8L));
+ __ Mov(w5, wcsp);
+
+ // restore csp.
+ __ Mov(csp, x29);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x1000, x0);
+ ASSERT_EQUAL_64(0x1050, x1);
+ ASSERT_EQUAL_64(0x104f, x2);
+ ASSERT_EQUAL_64(0x1fff, x3);
+ ASSERT_EQUAL_64(0xfffffff8, x4);
+ ASSERT_EQUAL_64(0xfffffff8, x5);
+
+ TEARDOWN();
+}
+
+
+TEST(mvn) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mvn(w0, 0xfff);
+ __ Mvn(x1, 0xfff);
+ __ Mvn(w2, Operand(w0, LSL, 1));
+ __ Mvn(x3, Operand(x1, LSL, 2));
+ __ Mvn(w4, Operand(w0, LSR, 3));
+ __ Mvn(x5, Operand(x1, LSR, 4));
+ __ Mvn(w6, Operand(w0, ASR, 11));
+ __ Mvn(x7, Operand(x1, ASR, 12));
+ __ Mvn(w8, Operand(w0, ROR, 13));
+ __ Mvn(x9, Operand(x1, ROR, 14));
+ __ Mvn(w10, Operand(w2, UXTB));
+ __ Mvn(x11, Operand(x2, SXTB, 1));
+ __ Mvn(w12, Operand(w2, UXTH, 2));
+ __ Mvn(x13, Operand(x2, SXTH, 3));
+ __ Mvn(x14, Operand(w2, UXTW, 4));
+ __ Mvn(x15, Operand(w2, SXTW, 4));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xfffff000, x0);
+ ASSERT_EQUAL_64(0xfffffffffffff000UL, x1);
+ ASSERT_EQUAL_64(0x00001fff, x2);
+ ASSERT_EQUAL_64(0x0000000000003fffUL, x3);
+ ASSERT_EQUAL_64(0xe00001ff, x4);
+ ASSERT_EQUAL_64(0xf0000000000000ffUL, x5);
+ ASSERT_EQUAL_64(0x00000001, x6);
+ ASSERT_EQUAL_64(0x0, x7);
+ ASSERT_EQUAL_64(0x7ff80000, x8);
+ ASSERT_EQUAL_64(0x3ffc000000000000UL, x9);
+ ASSERT_EQUAL_64(0xffffff00, x10);
+ ASSERT_EQUAL_64(0x0000000000000001UL, x11);
+ ASSERT_EQUAL_64(0xffff8003, x12);
+ ASSERT_EQUAL_64(0xffffffffffff0007UL, x13);
+ ASSERT_EQUAL_64(0xfffffffffffe000fUL, x14);
+ ASSERT_EQUAL_64(0xfffffffffffe000fUL, x15);
+
+ TEARDOWN();
+}
+
+
+TEST(mov) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xffffffffffffffffL);
+ __ Mov(x1, 0xffffffffffffffffL);
+ __ Mov(x2, 0xffffffffffffffffL);
+ __ Mov(x3, 0xffffffffffffffffL);
+
+ __ Mov(x0, 0x0123456789abcdefL);
+
+ __ movz(x1, 0xabcdL << 16);
+ __ movk(x2, 0xabcdL << 32);
+ __ movn(x3, 0xabcdL << 48);
+
+ __ Mov(x4, 0x0123456789abcdefL);
+ __ Mov(x5, x4);
+
+ __ Mov(w6, -1);
+
+ // Test that moves back to the same register have the desired effect. This
+ // is a no-op for X registers, and a truncation for W registers.
+ __ Mov(x7, 0x0123456789abcdefL);
+ __ Mov(x7, x7);
+ __ Mov(x8, 0x0123456789abcdefL);
+ __ Mov(w8, w8);
+ __ Mov(x9, 0x0123456789abcdefL);
+ __ Mov(x9, Operand(x9));
+ __ Mov(x10, 0x0123456789abcdefL);
+ __ Mov(w10, Operand(w10));
+
+ __ Mov(w11, 0xfff);
+ __ Mov(x12, 0xfff);
+ __ Mov(w13, Operand(w11, LSL, 1));
+ __ Mov(x14, Operand(x12, LSL, 2));
+ __ Mov(w15, Operand(w11, LSR, 3));
+ __ Mov(x18, Operand(x12, LSR, 4));
+ __ Mov(w19, Operand(w11, ASR, 11));
+ __ Mov(x20, Operand(x12, ASR, 12));
+ __ Mov(w21, Operand(w11, ROR, 13));
+ __ Mov(x22, Operand(x12, ROR, 14));
+ __ Mov(w23, Operand(w13, UXTB));
+ __ Mov(x24, Operand(x13, SXTB, 1));
+ __ Mov(w25, Operand(w13, UXTH, 2));
+ __ Mov(x26, Operand(x13, SXTH, 3));
+ __ Mov(x27, Operand(w13, UXTW, 4));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x0123456789abcdefL, x0);
+ ASSERT_EQUAL_64(0x00000000abcd0000L, x1);
+ ASSERT_EQUAL_64(0xffffabcdffffffffL, x2);
+ ASSERT_EQUAL_64(0x5432ffffffffffffL, x3);
+ ASSERT_EQUAL_64(x4, x5);
+ ASSERT_EQUAL_32(-1, w6);
+ ASSERT_EQUAL_64(0x0123456789abcdefL, x7);
+ ASSERT_EQUAL_32(0x89abcdefL, w8);
+ ASSERT_EQUAL_64(0x0123456789abcdefL, x9);
+ ASSERT_EQUAL_32(0x89abcdefL, w10);
+ ASSERT_EQUAL_64(0x00000fff, x11);
+ ASSERT_EQUAL_64(0x0000000000000fffUL, x12);
+ ASSERT_EQUAL_64(0x00001ffe, x13);
+ ASSERT_EQUAL_64(0x0000000000003ffcUL, x14);
+ ASSERT_EQUAL_64(0x000001ff, x15);
+ ASSERT_EQUAL_64(0x00000000000000ffUL, x18);
+ ASSERT_EQUAL_64(0x00000001, x19);
+ ASSERT_EQUAL_64(0x0, x20);
+ ASSERT_EQUAL_64(0x7ff80000, x21);
+ ASSERT_EQUAL_64(0x3ffc000000000000UL, x22);
+ ASSERT_EQUAL_64(0x000000fe, x23);
+ ASSERT_EQUAL_64(0xfffffffffffffffcUL, x24);
+ ASSERT_EQUAL_64(0x00007ff8, x25);
+ ASSERT_EQUAL_64(0x000000000000fff0UL, x26);
+ ASSERT_EQUAL_64(0x000000000001ffe0UL, x27);
+
+ TEARDOWN();
+}
+
+
+TEST(mov_imm_w) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(w0, 0xffffffffL);
+ __ Mov(w1, 0xffff1234L);
+ __ Mov(w2, 0x1234ffffL);
+ __ Mov(w3, 0x00000000L);
+ __ Mov(w4, 0x00001234L);
+ __ Mov(w5, 0x12340000L);
+ __ Mov(w6, 0x12345678L);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xffffffffL, x0);
+ ASSERT_EQUAL_64(0xffff1234L, x1);
+ ASSERT_EQUAL_64(0x1234ffffL, x2);
+ ASSERT_EQUAL_64(0x00000000L, x3);
+ ASSERT_EQUAL_64(0x00001234L, x4);
+ ASSERT_EQUAL_64(0x12340000L, x5);
+ ASSERT_EQUAL_64(0x12345678L, x6);
+
+ TEARDOWN();
+}
+
+
+TEST(mov_imm_x) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xffffffffffffffffL);
+ __ Mov(x1, 0xffffffffffff1234L);
+ __ Mov(x2, 0xffffffff12345678L);
+ __ Mov(x3, 0xffff1234ffff5678L);
+ __ Mov(x4, 0x1234ffffffff5678L);
+ __ Mov(x5, 0x1234ffff5678ffffL);
+ __ Mov(x6, 0x12345678ffffffffL);
+ __ Mov(x7, 0x1234ffffffffffffL);
+ __ Mov(x8, 0x123456789abcffffL);
+ __ Mov(x9, 0x12345678ffff9abcL);
+ __ Mov(x10, 0x1234ffff56789abcL);
+ __ Mov(x11, 0xffff123456789abcL);
+ __ Mov(x12, 0x0000000000000000L);
+ __ Mov(x13, 0x0000000000001234L);
+ __ Mov(x14, 0x0000000012345678L);
+ __ Mov(x15, 0x0000123400005678L);
+ __ Mov(x18, 0x1234000000005678L);
+ __ Mov(x19, 0x1234000056780000L);
+ __ Mov(x20, 0x1234567800000000L);
+ __ Mov(x21, 0x1234000000000000L);
+ __ Mov(x22, 0x123456789abc0000L);
+ __ Mov(x23, 0x1234567800009abcL);
+ __ Mov(x24, 0x1234000056789abcL);
+ __ Mov(x25, 0x0000123456789abcL);
+ __ Mov(x26, 0x123456789abcdef0L);
+ __ Mov(x27, 0xffff000000000001L);
+ __ Mov(x28, 0x8000ffff00000000L);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xffffffffffff1234L, x1);
+ ASSERT_EQUAL_64(0xffffffff12345678L, x2);
+ ASSERT_EQUAL_64(0xffff1234ffff5678L, x3);
+ ASSERT_EQUAL_64(0x1234ffffffff5678L, x4);
+ ASSERT_EQUAL_64(0x1234ffff5678ffffL, x5);
+ ASSERT_EQUAL_64(0x12345678ffffffffL, x6);
+ ASSERT_EQUAL_64(0x1234ffffffffffffL, x7);
+ ASSERT_EQUAL_64(0x123456789abcffffL, x8);
+ ASSERT_EQUAL_64(0x12345678ffff9abcL, x9);
+ ASSERT_EQUAL_64(0x1234ffff56789abcL, x10);
+ ASSERT_EQUAL_64(0xffff123456789abcL, x11);
+ ASSERT_EQUAL_64(0x0000000000000000L, x12);
+ ASSERT_EQUAL_64(0x0000000000001234L, x13);
+ ASSERT_EQUAL_64(0x0000000012345678L, x14);
+ ASSERT_EQUAL_64(0x0000123400005678L, x15);
+ ASSERT_EQUAL_64(0x1234000000005678L, x18);
+ ASSERT_EQUAL_64(0x1234000056780000L, x19);
+ ASSERT_EQUAL_64(0x1234567800000000L, x20);
+ ASSERT_EQUAL_64(0x1234000000000000L, x21);
+ ASSERT_EQUAL_64(0x123456789abc0000L, x22);
+ ASSERT_EQUAL_64(0x1234567800009abcL, x23);
+ ASSERT_EQUAL_64(0x1234000056789abcL, x24);
+ ASSERT_EQUAL_64(0x0000123456789abcL, x25);
+ ASSERT_EQUAL_64(0x123456789abcdef0L, x26);
+ ASSERT_EQUAL_64(0xffff000000000001L, x27);
+ ASSERT_EQUAL_64(0x8000ffff00000000L, x28);
+
+ TEARDOWN();
+}
+
+
+TEST(orr) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xf0f0);
+ __ Mov(x1, 0xf00000ff);
+
+ __ Orr(x2, x0, Operand(x1));
+ __ Orr(w3, w0, Operand(w1, LSL, 28));
+ __ Orr(x4, x0, Operand(x1, LSL, 32));
+ __ Orr(x5, x0, Operand(x1, LSR, 4));
+ __ Orr(w6, w0, Operand(w1, ASR, 4));
+ __ Orr(x7, x0, Operand(x1, ASR, 4));
+ __ Orr(w8, w0, Operand(w1, ROR, 12));
+ __ Orr(x9, x0, Operand(x1, ROR, 12));
+ __ Orr(w10, w0, Operand(0xf));
+ __ Orr(x11, x0, Operand(0xf0000000f0000000L));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xf000f0ff, x2);
+ ASSERT_EQUAL_64(0xf000f0f0, x3);
+ ASSERT_EQUAL_64(0xf00000ff0000f0f0L, x4);
+ ASSERT_EQUAL_64(0x0f00f0ff, x5);
+ ASSERT_EQUAL_64(0xff00f0ff, x6);
+ ASSERT_EQUAL_64(0x0f00f0ff, x7);
+ ASSERT_EQUAL_64(0x0ffff0f0, x8);
+ ASSERT_EQUAL_64(0x0ff00000000ff0f0L, x9);
+ ASSERT_EQUAL_64(0xf0ff, x10);
+ ASSERT_EQUAL_64(0xf0000000f000f0f0L, x11);
+
+ TEARDOWN();
+}
+
+
+TEST(orr_extend) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 1);
+ __ Mov(x1, 0x8000000080008080UL);
+ __ Orr(w6, w0, Operand(w1, UXTB));
+ __ Orr(x7, x0, Operand(x1, UXTH, 1));
+ __ Orr(w8, w0, Operand(w1, UXTW, 2));
+ __ Orr(x9, x0, Operand(x1, UXTX, 3));
+ __ Orr(w10, w0, Operand(w1, SXTB));
+ __ Orr(x11, x0, Operand(x1, SXTH, 1));
+ __ Orr(x12, x0, Operand(x1, SXTW, 2));
+ __ Orr(x13, x0, Operand(x1, SXTX, 3));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x00000081, x6);
+ ASSERT_EQUAL_64(0x00010101, x7);
+ ASSERT_EQUAL_64(0x00020201, x8);
+ ASSERT_EQUAL_64(0x0000000400040401UL, x9);
+ ASSERT_EQUAL_64(0x00000000ffffff81UL, x10);
+ ASSERT_EQUAL_64(0xffffffffffff0101UL, x11);
+ ASSERT_EQUAL_64(0xfffffffe00020201UL, x12);
+ ASSERT_EQUAL_64(0x0000000400040401UL, x13);
+
+ TEARDOWN();
+}
+
+
+TEST(bitwise_wide_imm) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 0xf0f0f0f0f0f0f0f0UL);
+
+ __ Orr(x10, x0, Operand(0x1234567890abcdefUL));
+ __ Orr(w11, w1, Operand(0x90abcdef));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0, x0);
+ ASSERT_EQUAL_64(0xf0f0f0f0f0f0f0f0UL, x1);
+ ASSERT_EQUAL_64(0x1234567890abcdefUL, x10);
+ ASSERT_EQUAL_64(0xf0fbfdffUL, x11);
+
+ TEARDOWN();
+}
+
+
+TEST(orn) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xf0f0);
+ __ Mov(x1, 0xf00000ff);
+
+ __ Orn(x2, x0, Operand(x1));
+ __ Orn(w3, w0, Operand(w1, LSL, 4));
+ __ Orn(x4, x0, Operand(x1, LSL, 4));
+ __ Orn(x5, x0, Operand(x1, LSR, 1));
+ __ Orn(w6, w0, Operand(w1, ASR, 1));
+ __ Orn(x7, x0, Operand(x1, ASR, 1));
+ __ Orn(w8, w0, Operand(w1, ROR, 16));
+ __ Orn(x9, x0, Operand(x1, ROR, 16));
+ __ Orn(w10, w0, Operand(0xffff));
+ __ Orn(x11, x0, Operand(0xffff0000ffffL));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xffffffff0ffffff0L, x2);
+ ASSERT_EQUAL_64(0xfffff0ff, x3);
+ ASSERT_EQUAL_64(0xfffffff0fffff0ffL, x4);
+ ASSERT_EQUAL_64(0xffffffff87fffff0L, x5);
+ ASSERT_EQUAL_64(0x07fffff0, x6);
+ ASSERT_EQUAL_64(0xffffffff87fffff0L, x7);
+ ASSERT_EQUAL_64(0xff00ffff, x8);
+ ASSERT_EQUAL_64(0xff00ffffffffffffL, x9);
+ ASSERT_EQUAL_64(0xfffff0f0, x10);
+ ASSERT_EQUAL_64(0xffff0000fffff0f0L, x11);
+
+ TEARDOWN();
+}
+
+
+TEST(orn_extend) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 1);
+ __ Mov(x1, 0x8000000080008081UL);
+ __ Orn(w6, w0, Operand(w1, UXTB));
+ __ Orn(x7, x0, Operand(x1, UXTH, 1));
+ __ Orn(w8, w0, Operand(w1, UXTW, 2));
+ __ Orn(x9, x0, Operand(x1, UXTX, 3));
+ __ Orn(w10, w0, Operand(w1, SXTB));
+ __ Orn(x11, x0, Operand(x1, SXTH, 1));
+ __ Orn(x12, x0, Operand(x1, SXTW, 2));
+ __ Orn(x13, x0, Operand(x1, SXTX, 3));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xffffff7f, x6);
+ ASSERT_EQUAL_64(0xfffffffffffefefdUL, x7);
+ ASSERT_EQUAL_64(0xfffdfdfb, x8);
+ ASSERT_EQUAL_64(0xfffffffbfffbfbf7UL, x9);
+ ASSERT_EQUAL_64(0x0000007f, x10);
+ ASSERT_EQUAL_64(0x0000fefd, x11);
+ ASSERT_EQUAL_64(0x00000001fffdfdfbUL, x12);
+ ASSERT_EQUAL_64(0xfffffffbfffbfbf7UL, x13);
+
+ TEARDOWN();
+}
+
+
+TEST(and_) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xfff0);
+ __ Mov(x1, 0xf00000ff);
+
+ __ And(x2, x0, Operand(x1));
+ __ And(w3, w0, Operand(w1, LSL, 4));
+ __ And(x4, x0, Operand(x1, LSL, 4));
+ __ And(x5, x0, Operand(x1, LSR, 1));
+ __ And(w6, w0, Operand(w1, ASR, 20));
+ __ And(x7, x0, Operand(x1, ASR, 20));
+ __ And(w8, w0, Operand(w1, ROR, 28));
+ __ And(x9, x0, Operand(x1, ROR, 28));
+ __ And(w10, w0, Operand(0xff00));
+ __ And(x11, x0, Operand(0xff));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x000000f0, x2);
+ ASSERT_EQUAL_64(0x00000ff0, x3);
+ ASSERT_EQUAL_64(0x00000ff0, x4);
+ ASSERT_EQUAL_64(0x00000070, x5);
+ ASSERT_EQUAL_64(0x0000ff00, x6);
+ ASSERT_EQUAL_64(0x00000f00, x7);
+ ASSERT_EQUAL_64(0x00000ff0, x8);
+ ASSERT_EQUAL_64(0x00000000, x9);
+ ASSERT_EQUAL_64(0x0000ff00, x10);
+ ASSERT_EQUAL_64(0x000000f0, x11);
+
+ TEARDOWN();
+}
+
+
+TEST(and_extend) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xffffffffffffffffUL);
+ __ Mov(x1, 0x8000000080008081UL);
+ __ And(w6, w0, Operand(w1, UXTB));
+ __ And(x7, x0, Operand(x1, UXTH, 1));
+ __ And(w8, w0, Operand(w1, UXTW, 2));
+ __ And(x9, x0, Operand(x1, UXTX, 3));
+ __ And(w10, w0, Operand(w1, SXTB));
+ __ And(x11, x0, Operand(x1, SXTH, 1));
+ __ And(x12, x0, Operand(x1, SXTW, 2));
+ __ And(x13, x0, Operand(x1, SXTX, 3));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x00000081, x6);
+ ASSERT_EQUAL_64(0x00010102, x7);
+ ASSERT_EQUAL_64(0x00020204, x8);
+ ASSERT_EQUAL_64(0x0000000400040408UL, x9);
+ ASSERT_EQUAL_64(0xffffff81, x10);
+ ASSERT_EQUAL_64(0xffffffffffff0102UL, x11);
+ ASSERT_EQUAL_64(0xfffffffe00020204UL, x12);
+ ASSERT_EQUAL_64(0x0000000400040408UL, x13);
+
+ TEARDOWN();
+}
+
+
+TEST(ands) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x1, 0xf00000ff);
+ __ Ands(w0, w1, Operand(w1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+ ASSERT_EQUAL_64(0xf00000ff, x0);
+
+ START();
+ __ Mov(x0, 0xfff0);
+ __ Mov(x1, 0xf00000ff);
+ __ Ands(w0, w0, Operand(w1, LSR, 4));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZFlag);
+ ASSERT_EQUAL_64(0x00000000, x0);
+
+ START();
+ __ Mov(x0, 0x8000000000000000L);
+ __ Mov(x1, 0x00000001);
+ __ Ands(x0, x0, Operand(x1, ROR, 1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+ ASSERT_EQUAL_64(0x8000000000000000L, x0);
+
+ START();
+ __ Mov(x0, 0xfff0);
+ __ Ands(w0, w0, Operand(0xf));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZFlag);
+ ASSERT_EQUAL_64(0x00000000, x0);
+
+ START();
+ __ Mov(x0, 0xff000000);
+ __ Ands(w0, w0, Operand(0x80000000));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+ ASSERT_EQUAL_64(0x80000000, x0);
+
+ TEARDOWN();
+}
+
+
+TEST(bic) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xfff0);
+ __ Mov(x1, 0xf00000ff);
+
+ __ Bic(x2, x0, Operand(x1));
+ __ Bic(w3, w0, Operand(w1, LSL, 4));
+ __ Bic(x4, x0, Operand(x1, LSL, 4));
+ __ Bic(x5, x0, Operand(x1, LSR, 1));
+ __ Bic(w6, w0, Operand(w1, ASR, 20));
+ __ Bic(x7, x0, Operand(x1, ASR, 20));
+ __ Bic(w8, w0, Operand(w1, ROR, 28));
+ __ Bic(x9, x0, Operand(x1, ROR, 24));
+ __ Bic(x10, x0, Operand(0x1f));
+ __ Bic(x11, x0, Operand(0x100));
+
+ // Test bic into csp when the constant cannot be encoded in the immediate
+ // field.
+ // Use x20 to preserve csp. We check for the result via x21 because the
+ // test infrastructure requires that csp be restored to its original value.
+ __ Mov(x20, csp);
+ __ Mov(x0, 0xffffff);
+ __ Bic(csp, x0, Operand(0xabcdef));
+ __ Mov(x21, csp);
+ __ Mov(csp, x20);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x0000ff00, x2);
+ ASSERT_EQUAL_64(0x0000f000, x3);
+ ASSERT_EQUAL_64(0x0000f000, x4);
+ ASSERT_EQUAL_64(0x0000ff80, x5);
+ ASSERT_EQUAL_64(0x000000f0, x6);
+ ASSERT_EQUAL_64(0x0000f0f0, x7);
+ ASSERT_EQUAL_64(0x0000f000, x8);
+ ASSERT_EQUAL_64(0x0000ff00, x9);
+ ASSERT_EQUAL_64(0x0000ffe0, x10);
+ ASSERT_EQUAL_64(0x0000fef0, x11);
+
+ ASSERT_EQUAL_64(0x543210, x21);
+
+ TEARDOWN();
+}
+
+
+TEST(bic_extend) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xffffffffffffffffUL);
+ __ Mov(x1, 0x8000000080008081UL);
+ __ Bic(w6, w0, Operand(w1, UXTB));
+ __ Bic(x7, x0, Operand(x1, UXTH, 1));
+ __ Bic(w8, w0, Operand(w1, UXTW, 2));
+ __ Bic(x9, x0, Operand(x1, UXTX, 3));
+ __ Bic(w10, w0, Operand(w1, SXTB));
+ __ Bic(x11, x0, Operand(x1, SXTH, 1));
+ __ Bic(x12, x0, Operand(x1, SXTW, 2));
+ __ Bic(x13, x0, Operand(x1, SXTX, 3));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xffffff7e, x6);
+ ASSERT_EQUAL_64(0xfffffffffffefefdUL, x7);
+ ASSERT_EQUAL_64(0xfffdfdfb, x8);
+ ASSERT_EQUAL_64(0xfffffffbfffbfbf7UL, x9);
+ ASSERT_EQUAL_64(0x0000007e, x10);
+ ASSERT_EQUAL_64(0x0000fefd, x11);
+ ASSERT_EQUAL_64(0x00000001fffdfdfbUL, x12);
+ ASSERT_EQUAL_64(0xfffffffbfffbfbf7UL, x13);
+
+ TEARDOWN();
+}
+
+
+TEST(bics) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x1, 0xffff);
+ __ Bics(w0, w1, Operand(w1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZFlag);
+ ASSERT_EQUAL_64(0x00000000, x0);
+
+ START();
+ __ Mov(x0, 0xffffffff);
+ __ Bics(w0, w0, Operand(w0, LSR, 1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+ ASSERT_EQUAL_64(0x80000000, x0);
+
+ START();
+ __ Mov(x0, 0x8000000000000000L);
+ __ Mov(x1, 0x00000001);
+ __ Bics(x0, x0, Operand(x1, ROR, 1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZFlag);
+ ASSERT_EQUAL_64(0x00000000, x0);
+
+ START();
+ __ Mov(x0, 0xffffffffffffffffL);
+ __ Bics(x0, x0, Operand(0x7fffffffffffffffL));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+ ASSERT_EQUAL_64(0x8000000000000000L, x0);
+
+ START();
+ __ Mov(w0, 0xffff0000);
+ __ Bics(w0, w0, Operand(0xfffffff0));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZFlag);
+ ASSERT_EQUAL_64(0x00000000, x0);
+
+ TEARDOWN();
+}
+
+
+TEST(eor) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xfff0);
+ __ Mov(x1, 0xf00000ff);
+
+ __ Eor(x2, x0, Operand(x1));
+ __ Eor(w3, w0, Operand(w1, LSL, 4));
+ __ Eor(x4, x0, Operand(x1, LSL, 4));
+ __ Eor(x5, x0, Operand(x1, LSR, 1));
+ __ Eor(w6, w0, Operand(w1, ASR, 20));
+ __ Eor(x7, x0, Operand(x1, ASR, 20));
+ __ Eor(w8, w0, Operand(w1, ROR, 28));
+ __ Eor(x9, x0, Operand(x1, ROR, 28));
+ __ Eor(w10, w0, Operand(0xff00ff00));
+ __ Eor(x11, x0, Operand(0xff00ff00ff00ff00L));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xf000ff0f, x2);
+ ASSERT_EQUAL_64(0x0000f000, x3);
+ ASSERT_EQUAL_64(0x0000000f0000f000L, x4);
+ ASSERT_EQUAL_64(0x7800ff8f, x5);
+ ASSERT_EQUAL_64(0xffff00f0, x6);
+ ASSERT_EQUAL_64(0x0000f0f0, x7);
+ ASSERT_EQUAL_64(0x0000f00f, x8);
+ ASSERT_EQUAL_64(0x00000ff00000ffffL, x9);
+ ASSERT_EQUAL_64(0xff0000f0, x10);
+ ASSERT_EQUAL_64(0xff00ff00ff0000f0L, x11);
+
+ TEARDOWN();
+}
+
+
+TEST(eor_extend) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0x1111111111111111UL);
+ __ Mov(x1, 0x8000000080008081UL);
+ __ Eor(w6, w0, Operand(w1, UXTB));
+ __ Eor(x7, x0, Operand(x1, UXTH, 1));
+ __ Eor(w8, w0, Operand(w1, UXTW, 2));
+ __ Eor(x9, x0, Operand(x1, UXTX, 3));
+ __ Eor(w10, w0, Operand(w1, SXTB));
+ __ Eor(x11, x0, Operand(x1, SXTH, 1));
+ __ Eor(x12, x0, Operand(x1, SXTW, 2));
+ __ Eor(x13, x0, Operand(x1, SXTX, 3));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x11111190, x6);
+ ASSERT_EQUAL_64(0x1111111111101013UL, x7);
+ ASSERT_EQUAL_64(0x11131315, x8);
+ ASSERT_EQUAL_64(0x1111111511151519UL, x9);
+ ASSERT_EQUAL_64(0xeeeeee90, x10);
+ ASSERT_EQUAL_64(0xeeeeeeeeeeee1013UL, x11);
+ ASSERT_EQUAL_64(0xeeeeeeef11131315UL, x12);
+ ASSERT_EQUAL_64(0x1111111511151519UL, x13);
+
+ TEARDOWN();
+}
+
+
+TEST(eon) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xfff0);
+ __ Mov(x1, 0xf00000ff);
+
+ __ Eon(x2, x0, Operand(x1));
+ __ Eon(w3, w0, Operand(w1, LSL, 4));
+ __ Eon(x4, x0, Operand(x1, LSL, 4));
+ __ Eon(x5, x0, Operand(x1, LSR, 1));
+ __ Eon(w6, w0, Operand(w1, ASR, 20));
+ __ Eon(x7, x0, Operand(x1, ASR, 20));
+ __ Eon(w8, w0, Operand(w1, ROR, 28));
+ __ Eon(x9, x0, Operand(x1, ROR, 28));
+ __ Eon(w10, w0, Operand(0x03c003c0));
+ __ Eon(x11, x0, Operand(0x0000100000001000L));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xffffffff0fff00f0L, x2);
+ ASSERT_EQUAL_64(0xffff0fff, x3);
+ ASSERT_EQUAL_64(0xfffffff0ffff0fffL, x4);
+ ASSERT_EQUAL_64(0xffffffff87ff0070L, x5);
+ ASSERT_EQUAL_64(0x0000ff0f, x6);
+ ASSERT_EQUAL_64(0xffffffffffff0f0fL, x7);
+ ASSERT_EQUAL_64(0xffff0ff0, x8);
+ ASSERT_EQUAL_64(0xfffff00fffff0000L, x9);
+ ASSERT_EQUAL_64(0xfc3f03cf, x10);
+ ASSERT_EQUAL_64(0xffffefffffff100fL, x11);
+
+ TEARDOWN();
+}
+
+
+TEST(eon_extend) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0x1111111111111111UL);
+ __ Mov(x1, 0x8000000080008081UL);
+ __ Eon(w6, w0, Operand(w1, UXTB));
+ __ Eon(x7, x0, Operand(x1, UXTH, 1));
+ __ Eon(w8, w0, Operand(w1, UXTW, 2));
+ __ Eon(x9, x0, Operand(x1, UXTX, 3));
+ __ Eon(w10, w0, Operand(w1, SXTB));
+ __ Eon(x11, x0, Operand(x1, SXTH, 1));
+ __ Eon(x12, x0, Operand(x1, SXTW, 2));
+ __ Eon(x13, x0, Operand(x1, SXTX, 3));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xeeeeee6f, x6);
+ ASSERT_EQUAL_64(0xeeeeeeeeeeefefecUL, x7);
+ ASSERT_EQUAL_64(0xeeececea, x8);
+ ASSERT_EQUAL_64(0xeeeeeeeaeeeaeae6UL, x9);
+ ASSERT_EQUAL_64(0x1111116f, x10);
+ ASSERT_EQUAL_64(0x111111111111efecUL, x11);
+ ASSERT_EQUAL_64(0x11111110eeececeaUL, x12);
+ ASSERT_EQUAL_64(0xeeeeeeeaeeeaeae6UL, x13);
+
+ TEARDOWN();
+}
+
+
+TEST(mul) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x16, 0);
+ __ Mov(x17, 1);
+ __ Mov(x18, 0xffffffff);
+ __ Mov(x19, 0xffffffffffffffffUL);
+
+ __ Mul(w0, w16, w16);
+ __ Mul(w1, w16, w17);
+ __ Mul(w2, w17, w18);
+ __ Mul(w3, w18, w19);
+ __ Mul(x4, x16, x16);
+ __ Mul(x5, x17, x18);
+ __ Mul(x6, x18, x19);
+ __ Mul(x7, x19, x19);
+ __ Smull(x8, w17, w18);
+ __ Smull(x9, w18, w18);
+ __ Smull(x10, w19, w19);
+ __ Mneg(w11, w16, w16);
+ __ Mneg(w12, w16, w17);
+ __ Mneg(w13, w17, w18);
+ __ Mneg(w14, w18, w19);
+ __ Mneg(x20, x16, x16);
+ __ Mneg(x21, x17, x18);
+ __ Mneg(x22, x18, x19);
+ __ Mneg(x23, x19, x19);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0, x0);
+ ASSERT_EQUAL_64(0, x1);
+ ASSERT_EQUAL_64(0xffffffff, x2);
+ ASSERT_EQUAL_64(1, x3);
+ ASSERT_EQUAL_64(0, x4);
+ ASSERT_EQUAL_64(0xffffffff, x5);
+ ASSERT_EQUAL_64(0xffffffff00000001UL, x6);
+ ASSERT_EQUAL_64(1, x7);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x8);
+ ASSERT_EQUAL_64(1, x9);
+ ASSERT_EQUAL_64(1, x10);
+ ASSERT_EQUAL_64(0, x11);
+ ASSERT_EQUAL_64(0, x12);
+ ASSERT_EQUAL_64(1, x13);
+ ASSERT_EQUAL_64(0xffffffff, x14);
+ ASSERT_EQUAL_64(0, x20);
+ ASSERT_EQUAL_64(0xffffffff00000001UL, x21);
+ ASSERT_EQUAL_64(0xffffffff, x22);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x23);
+
+ TEARDOWN();
+}
+
+
+static void SmullHelper(int64_t expected, int64_t a, int64_t b) {
+ SETUP();
+ START();
+ __ Mov(w0, a);
+ __ Mov(w1, b);
+ __ Smull(x2, w0, w1);
+ END();
+ RUN();
+ ASSERT_EQUAL_64(expected, x2);
+ TEARDOWN();
+}
+
+
+TEST(smull) {
+ INIT_V8();
+ SmullHelper(0, 0, 0);
+ SmullHelper(1, 1, 1);
+ SmullHelper(-1, -1, 1);
+ SmullHelper(1, -1, -1);
+ SmullHelper(0xffffffff80000000, 0x80000000, 1);
+ SmullHelper(0x0000000080000000, 0x00010000, 0x00008000);
+}
+
+
+TEST(madd) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x16, 0);
+ __ Mov(x17, 1);
+ __ Mov(x18, 0xffffffff);
+ __ Mov(x19, 0xffffffffffffffffUL);
+
+ __ Madd(w0, w16, w16, w16);
+ __ Madd(w1, w16, w16, w17);
+ __ Madd(w2, w16, w16, w18);
+ __ Madd(w3, w16, w16, w19);
+ __ Madd(w4, w16, w17, w17);
+ __ Madd(w5, w17, w17, w18);
+ __ Madd(w6, w17, w17, w19);
+ __ Madd(w7, w17, w18, w16);
+ __ Madd(w8, w17, w18, w18);
+ __ Madd(w9, w18, w18, w17);
+ __ Madd(w10, w18, w19, w18);
+ __ Madd(w11, w19, w19, w19);
+
+ __ Madd(x12, x16, x16, x16);
+ __ Madd(x13, x16, x16, x17);
+ __ Madd(x14, x16, x16, x18);
+ __ Madd(x15, x16, x16, x19);
+ __ Madd(x20, x16, x17, x17);
+ __ Madd(x21, x17, x17, x18);
+ __ Madd(x22, x17, x17, x19);
+ __ Madd(x23, x17, x18, x16);
+ __ Madd(x24, x17, x18, x18);
+ __ Madd(x25, x18, x18, x17);
+ __ Madd(x26, x18, x19, x18);
+ __ Madd(x27, x19, x19, x19);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0, x0);
+ ASSERT_EQUAL_64(1, x1);
+ ASSERT_EQUAL_64(0xffffffff, x2);
+ ASSERT_EQUAL_64(0xffffffff, x3);
+ ASSERT_EQUAL_64(1, x4);
+ ASSERT_EQUAL_64(0, x5);
+ ASSERT_EQUAL_64(0, x6);
+ ASSERT_EQUAL_64(0xffffffff, x7);
+ ASSERT_EQUAL_64(0xfffffffe, x8);
+ ASSERT_EQUAL_64(2, x9);
+ ASSERT_EQUAL_64(0, x10);
+ ASSERT_EQUAL_64(0, x11);
+
+ ASSERT_EQUAL_64(0, x12);
+ ASSERT_EQUAL_64(1, x13);
+ ASSERT_EQUAL_64(0xffffffff, x14);
+ ASSERT_EQUAL_64(0xffffffffffffffff, x15);
+ ASSERT_EQUAL_64(1, x20);
+ ASSERT_EQUAL_64(0x100000000UL, x21);
+ ASSERT_EQUAL_64(0, x22);
+ ASSERT_EQUAL_64(0xffffffff, x23);
+ ASSERT_EQUAL_64(0x1fffffffe, x24);
+ ASSERT_EQUAL_64(0xfffffffe00000002UL, x25);
+ ASSERT_EQUAL_64(0, x26);
+ ASSERT_EQUAL_64(0, x27);
+
+ TEARDOWN();
+}
+
+
+TEST(msub) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x16, 0);
+ __ Mov(x17, 1);
+ __ Mov(x18, 0xffffffff);
+ __ Mov(x19, 0xffffffffffffffffUL);
+
+ __ Msub(w0, w16, w16, w16);
+ __ Msub(w1, w16, w16, w17);
+ __ Msub(w2, w16, w16, w18);
+ __ Msub(w3, w16, w16, w19);
+ __ Msub(w4, w16, w17, w17);
+ __ Msub(w5, w17, w17, w18);
+ __ Msub(w6, w17, w17, w19);
+ __ Msub(w7, w17, w18, w16);
+ __ Msub(w8, w17, w18, w18);
+ __ Msub(w9, w18, w18, w17);
+ __ Msub(w10, w18, w19, w18);
+ __ Msub(w11, w19, w19, w19);
+
+ __ Msub(x12, x16, x16, x16);
+ __ Msub(x13, x16, x16, x17);
+ __ Msub(x14, x16, x16, x18);
+ __ Msub(x15, x16, x16, x19);
+ __ Msub(x20, x16, x17, x17);
+ __ Msub(x21, x17, x17, x18);
+ __ Msub(x22, x17, x17, x19);
+ __ Msub(x23, x17, x18, x16);
+ __ Msub(x24, x17, x18, x18);
+ __ Msub(x25, x18, x18, x17);
+ __ Msub(x26, x18, x19, x18);
+ __ Msub(x27, x19, x19, x19);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0, x0);
+ ASSERT_EQUAL_64(1, x1);
+ ASSERT_EQUAL_64(0xffffffff, x2);
+ ASSERT_EQUAL_64(0xffffffff, x3);
+ ASSERT_EQUAL_64(1, x4);
+ ASSERT_EQUAL_64(0xfffffffe, x5);
+ ASSERT_EQUAL_64(0xfffffffe, x6);
+ ASSERT_EQUAL_64(1, x7);
+ ASSERT_EQUAL_64(0, x8);
+ ASSERT_EQUAL_64(0, x9);
+ ASSERT_EQUAL_64(0xfffffffe, x10);
+ ASSERT_EQUAL_64(0xfffffffe, x11);
+
+ ASSERT_EQUAL_64(0, x12);
+ ASSERT_EQUAL_64(1, x13);
+ ASSERT_EQUAL_64(0xffffffff, x14);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x15);
+ ASSERT_EQUAL_64(1, x20);
+ ASSERT_EQUAL_64(0xfffffffeUL, x21);
+ ASSERT_EQUAL_64(0xfffffffffffffffeUL, x22);
+ ASSERT_EQUAL_64(0xffffffff00000001UL, x23);
+ ASSERT_EQUAL_64(0, x24);
+ ASSERT_EQUAL_64(0x200000000UL, x25);
+ ASSERT_EQUAL_64(0x1fffffffeUL, x26);
+ ASSERT_EQUAL_64(0xfffffffffffffffeUL, x27);
+
+ TEARDOWN();
+}
+
+
+TEST(smulh) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x20, 0);
+ __ Mov(x21, 1);
+ __ Mov(x22, 0x0000000100000000L);
+ __ Mov(x23, 0x12345678);
+ __ Mov(x24, 0x0123456789abcdefL);
+ __ Mov(x25, 0x0000000200000000L);
+ __ Mov(x26, 0x8000000000000000UL);
+ __ Mov(x27, 0xffffffffffffffffUL);
+ __ Mov(x28, 0x5555555555555555UL);
+ __ Mov(x29, 0xaaaaaaaaaaaaaaaaUL);
+
+ __ Smulh(x0, x20, x24);
+ __ Smulh(x1, x21, x24);
+ __ Smulh(x2, x22, x23);
+ __ Smulh(x3, x22, x24);
+ __ Smulh(x4, x24, x25);
+ __ Smulh(x5, x23, x27);
+ __ Smulh(x6, x26, x26);
+ __ Smulh(x7, x26, x27);
+ __ Smulh(x8, x27, x27);
+ __ Smulh(x9, x28, x28);
+ __ Smulh(x10, x28, x29);
+ __ Smulh(x11, x29, x29);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0, x0);
+ ASSERT_EQUAL_64(0, x1);
+ ASSERT_EQUAL_64(0, x2);
+ ASSERT_EQUAL_64(0x01234567, x3);
+ ASSERT_EQUAL_64(0x02468acf, x4);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x5);
+ ASSERT_EQUAL_64(0x4000000000000000UL, x6);
+ ASSERT_EQUAL_64(0, x7);
+ ASSERT_EQUAL_64(0, x8);
+ ASSERT_EQUAL_64(0x1c71c71c71c71c71UL, x9);
+ ASSERT_EQUAL_64(0xe38e38e38e38e38eUL, x10);
+ ASSERT_EQUAL_64(0x1c71c71c71c71c72UL, x11);
+
+ TEARDOWN();
+}
+
+
+TEST(smaddl_umaddl) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x17, 1);
+ __ Mov(x18, 0xffffffff);
+ __ Mov(x19, 0xffffffffffffffffUL);
+ __ Mov(x20, 4);
+ __ Mov(x21, 0x200000000UL);
+
+ __ Smaddl(x9, w17, w18, x20);
+ __ Smaddl(x10, w18, w18, x20);
+ __ Smaddl(x11, w19, w19, x20);
+ __ Smaddl(x12, w19, w19, x21);
+ __ Umaddl(x13, w17, w18, x20);
+ __ Umaddl(x14, w18, w18, x20);
+ __ Umaddl(x15, w19, w19, x20);
+ __ Umaddl(x22, w19, w19, x21);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(3, x9);
+ ASSERT_EQUAL_64(5, x10);
+ ASSERT_EQUAL_64(5, x11);
+ ASSERT_EQUAL_64(0x200000001UL, x12);
+ ASSERT_EQUAL_64(0x100000003UL, x13);
+ ASSERT_EQUAL_64(0xfffffffe00000005UL, x14);
+ ASSERT_EQUAL_64(0xfffffffe00000005UL, x15);
+ ASSERT_EQUAL_64(0x1, x22);
+
+ TEARDOWN();
+}
+
+
+TEST(smsubl_umsubl) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x17, 1);
+ __ Mov(x18, 0xffffffff);
+ __ Mov(x19, 0xffffffffffffffffUL);
+ __ Mov(x20, 4);
+ __ Mov(x21, 0x200000000UL);
+
+ __ Smsubl(x9, w17, w18, x20);
+ __ Smsubl(x10, w18, w18, x20);
+ __ Smsubl(x11, w19, w19, x20);
+ __ Smsubl(x12, w19, w19, x21);
+ __ Umsubl(x13, w17, w18, x20);
+ __ Umsubl(x14, w18, w18, x20);
+ __ Umsubl(x15, w19, w19, x20);
+ __ Umsubl(x22, w19, w19, x21);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(5, x9);
+ ASSERT_EQUAL_64(3, x10);
+ ASSERT_EQUAL_64(3, x11);
+ ASSERT_EQUAL_64(0x1ffffffffUL, x12);
+ ASSERT_EQUAL_64(0xffffffff00000005UL, x13);
+ ASSERT_EQUAL_64(0x200000003UL, x14);
+ ASSERT_EQUAL_64(0x200000003UL, x15);
+ ASSERT_EQUAL_64(0x3ffffffffUL, x22);
+
+ TEARDOWN();
+}
+
+
+TEST(div) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x16, 1);
+ __ Mov(x17, 0xffffffff);
+ __ Mov(x18, 0xffffffffffffffffUL);
+ __ Mov(x19, 0x80000000);
+ __ Mov(x20, 0x8000000000000000UL);
+ __ Mov(x21, 2);
+
+ __ Udiv(w0, w16, w16);
+ __ Udiv(w1, w17, w16);
+ __ Sdiv(w2, w16, w16);
+ __ Sdiv(w3, w16, w17);
+ __ Sdiv(w4, w17, w18);
+
+ __ Udiv(x5, x16, x16);
+ __ Udiv(x6, x17, x18);
+ __ Sdiv(x7, x16, x16);
+ __ Sdiv(x8, x16, x17);
+ __ Sdiv(x9, x17, x18);
+
+ __ Udiv(w10, w19, w21);
+ __ Sdiv(w11, w19, w21);
+ __ Udiv(x12, x19, x21);
+ __ Sdiv(x13, x19, x21);
+ __ Udiv(x14, x20, x21);
+ __ Sdiv(x15, x20, x21);
+
+ __ Udiv(w22, w19, w17);
+ __ Sdiv(w23, w19, w17);
+ __ Udiv(x24, x20, x18);
+ __ Sdiv(x25, x20, x18);
+
+ __ Udiv(x26, x16, x21);
+ __ Sdiv(x27, x16, x21);
+ __ Udiv(x28, x18, x21);
+ __ Sdiv(x29, x18, x21);
+
+ __ Mov(x17, 0);
+ __ Udiv(w18, w16, w17);
+ __ Sdiv(w19, w16, w17);
+ __ Udiv(x20, x16, x17);
+ __ Sdiv(x21, x16, x17);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(0xffffffff, x1);
+ ASSERT_EQUAL_64(1, x2);
+ ASSERT_EQUAL_64(0xffffffff, x3);
+ ASSERT_EQUAL_64(1, x4);
+ ASSERT_EQUAL_64(1, x5);
+ ASSERT_EQUAL_64(0, x6);
+ ASSERT_EQUAL_64(1, x7);
+ ASSERT_EQUAL_64(0, x8);
+ ASSERT_EQUAL_64(0xffffffff00000001UL, x9);
+ ASSERT_EQUAL_64(0x40000000, x10);
+ ASSERT_EQUAL_64(0xC0000000, x11);
+ ASSERT_EQUAL_64(0x40000000, x12);
+ ASSERT_EQUAL_64(0x40000000, x13);
+ ASSERT_EQUAL_64(0x4000000000000000UL, x14);
+ ASSERT_EQUAL_64(0xC000000000000000UL, x15);
+ ASSERT_EQUAL_64(0, x22);
+ ASSERT_EQUAL_64(0x80000000, x23);
+ ASSERT_EQUAL_64(0, x24);
+ ASSERT_EQUAL_64(0x8000000000000000UL, x25);
+ ASSERT_EQUAL_64(0, x26);
+ ASSERT_EQUAL_64(0, x27);
+ ASSERT_EQUAL_64(0x7fffffffffffffffUL, x28);
+ ASSERT_EQUAL_64(0, x29);
+ ASSERT_EQUAL_64(0, x18);
+ ASSERT_EQUAL_64(0, x19);
+ ASSERT_EQUAL_64(0, x20);
+ ASSERT_EQUAL_64(0, x21);
+
+ TEARDOWN();
+}
+
+
+TEST(rbit_rev) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x24, 0xfedcba9876543210UL);
+ __ Rbit(w0, w24);
+ __ Rbit(x1, x24);
+ __ Rev16(w2, w24);
+ __ Rev16(x3, x24);
+ __ Rev(w4, w24);
+ __ Rev32(x5, x24);
+ __ Rev(x6, x24);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x084c2a6e, x0);
+ ASSERT_EQUAL_64(0x084c2a6e195d3b7fUL, x1);
+ ASSERT_EQUAL_64(0x54761032, x2);
+ ASSERT_EQUAL_64(0xdcfe98ba54761032UL, x3);
+ ASSERT_EQUAL_64(0x10325476, x4);
+ ASSERT_EQUAL_64(0x98badcfe10325476UL, x5);
+ ASSERT_EQUAL_64(0x1032547698badcfeUL, x6);
+
+ TEARDOWN();
+}
+
+
+TEST(clz_cls) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x24, 0x0008000000800000UL);
+ __ Mov(x25, 0xff800000fff80000UL);
+ __ Mov(x26, 0);
+ __ Clz(w0, w24);
+ __ Clz(x1, x24);
+ __ Clz(w2, w25);
+ __ Clz(x3, x25);
+ __ Clz(w4, w26);
+ __ Clz(x5, x26);
+ __ Cls(w6, w24);
+ __ Cls(x7, x24);
+ __ Cls(w8, w25);
+ __ Cls(x9, x25);
+ __ Cls(w10, w26);
+ __ Cls(x11, x26);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(8, x0);
+ ASSERT_EQUAL_64(12, x1);
+ ASSERT_EQUAL_64(0, x2);
+ ASSERT_EQUAL_64(0, x3);
+ ASSERT_EQUAL_64(32, x4);
+ ASSERT_EQUAL_64(64, x5);
+ ASSERT_EQUAL_64(7, x6);
+ ASSERT_EQUAL_64(11, x7);
+ ASSERT_EQUAL_64(12, x8);
+ ASSERT_EQUAL_64(8, x9);
+ ASSERT_EQUAL_64(31, x10);
+ ASSERT_EQUAL_64(63, x11);
+
+ TEARDOWN();
+}
+
+
+TEST(label) {
+ INIT_V8();
+ SETUP();
+
+ Label label_1, label_2, label_3, label_4;
+
+ START();
+ __ Mov(x0, 0x1);
+ __ Mov(x1, 0x0);
+ __ Mov(x22, lr); // Save lr.
+
+ __ B(&label_1);
+ __ B(&label_1);
+ __ B(&label_1); // Multiple branches to the same label.
+ __ Mov(x0, 0x0);
+ __ Bind(&label_2);
+ __ B(&label_3); // Forward branch.
+ __ Mov(x0, 0x0);
+ __ Bind(&label_1);
+ __ B(&label_2); // Backward branch.
+ __ Mov(x0, 0x0);
+ __ Bind(&label_3);
+ __ Bl(&label_4);
+ END();
+
+ __ Bind(&label_4);
+ __ Mov(x1, 0x1);
+ __ Mov(lr, x22);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x1, x0);
+ ASSERT_EQUAL_64(0x1, x1);
+
+ TEARDOWN();
+}
+
+
+TEST(branch_at_start) {
+ INIT_V8();
+ SETUP();
+
+ Label good, exit;
+
+ // Test that branches can exist at the start of the buffer. (This is a
+ // boundary condition in the label-handling code.) To achieve this, we have
+ // to work around the code generated by START.
+ RESET();
+ __ B(&good);
+
+ START_AFTER_RESET();
+ __ Mov(x0, 0x0);
+ END();
+
+ __ Bind(&exit);
+ START_AFTER_RESET();
+ __ Mov(x0, 0x1);
+ END();
+
+ __ Bind(&good);
+ __ B(&exit);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x1, x0);
+ TEARDOWN();
+}
+
+
+TEST(adr) {
+ INIT_V8();
+ SETUP();
+
+ Label label_1, label_2, label_3, label_4;
+
+ START();
+ __ Mov(x0, 0x0); // Set to non-zero to indicate failure.
+ __ Adr(x1, &label_3); // Set to zero to indicate success.
+
+ __ Adr(x2, &label_1); // Multiple forward references to the same label.
+ __ Adr(x3, &label_1);
+ __ Adr(x4, &label_1);
+
+ __ Bind(&label_2);
+ __ Eor(x5, x2, Operand(x3)); // Ensure that x2,x3 and x4 are identical.
+ __ Eor(x6, x2, Operand(x4));
+ __ Orr(x0, x0, Operand(x5));
+ __ Orr(x0, x0, Operand(x6));
+ __ Br(x2); // label_1, label_3
+
+ __ Bind(&label_3);
+ __ Adr(x2, &label_3); // Self-reference (offset 0).
+ __ Eor(x1, x1, Operand(x2));
+ __ Adr(x2, &label_4); // Simple forward reference.
+ __ Br(x2); // label_4
+
+ __ Bind(&label_1);
+ __ Adr(x2, &label_3); // Multiple reverse references to the same label.
+ __ Adr(x3, &label_3);
+ __ Adr(x4, &label_3);
+ __ Adr(x5, &label_2); // Simple reverse reference.
+ __ Br(x5); // label_2
+
+ __ Bind(&label_4);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x0, x0);
+ ASSERT_EQUAL_64(0x0, x1);
+
+ TEARDOWN();
+}
+
+
+TEST(branch_cond) {
+ INIT_V8();
+ SETUP();
+
+ Label wrong;
+
+ START();
+ __ Mov(x0, 0x1);
+ __ Mov(x1, 0x1);
+ __ Mov(x2, 0x8000000000000000L);
+
+ // For each 'cmp' instruction below, condition codes other than the ones
+ // following it would branch.
+
+ __ Cmp(x1, 0);
+ __ B(&wrong, eq);
+ __ B(&wrong, lo);
+ __ B(&wrong, mi);
+ __ B(&wrong, vs);
+ __ B(&wrong, ls);
+ __ B(&wrong, lt);
+ __ B(&wrong, le);
+ Label ok_1;
+ __ B(&ok_1, ne);
+ __ Mov(x0, 0x0);
+ __ Bind(&ok_1);
+
+ __ Cmp(x1, 1);
+ __ B(&wrong, ne);
+ __ B(&wrong, lo);
+ __ B(&wrong, mi);
+ __ B(&wrong, vs);
+ __ B(&wrong, hi);
+ __ B(&wrong, lt);
+ __ B(&wrong, gt);
+ Label ok_2;
+ __ B(&ok_2, pl);
+ __ Mov(x0, 0x0);
+ __ Bind(&ok_2);
+
+ __ Cmp(x1, 2);
+ __ B(&wrong, eq);
+ __ B(&wrong, hs);
+ __ B(&wrong, pl);
+ __ B(&wrong, vs);
+ __ B(&wrong, hi);
+ __ B(&wrong, ge);
+ __ B(&wrong, gt);
+ Label ok_3;
+ __ B(&ok_3, vc);
+ __ Mov(x0, 0x0);
+ __ Bind(&ok_3);
+
+ __ Cmp(x2, 1);
+ __ B(&wrong, eq);
+ __ B(&wrong, lo);
+ __ B(&wrong, mi);
+ __ B(&wrong, vc);
+ __ B(&wrong, ls);
+ __ B(&wrong, ge);
+ __ B(&wrong, gt);
+ Label ok_4;
+ __ B(&ok_4, le);
+ __ Mov(x0, 0x0);
+ __ Bind(&ok_4);
+
+ Label ok_5;
+ __ b(&ok_5, al);
+ __ Mov(x0, 0x0);
+ __ Bind(&ok_5);
+
+ Label ok_6;
+ __ b(&ok_6, nv);
+ __ Mov(x0, 0x0);
+ __ Bind(&ok_6);
+
+ END();
+
+ __ Bind(&wrong);
+ __ Mov(x0, 0x0);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x1, x0);
+
+ TEARDOWN();
+}
+
+
+TEST(branch_to_reg) {
+ INIT_V8();
+ SETUP();
+
+ // Test br.
+ Label fn1, after_fn1;
+
+ START();
+ __ Mov(x29, lr);
+
+ __ Mov(x1, 0);
+ __ B(&after_fn1);
+
+ __ Bind(&fn1);
+ __ Mov(x0, lr);
+ __ Mov(x1, 42);
+ __ Br(x0);
+
+ __ Bind(&after_fn1);
+ __ Bl(&fn1);
+
+ // Test blr.
+ Label fn2, after_fn2;
+
+ __ Mov(x2, 0);
+ __ B(&after_fn2);
+
+ __ Bind(&fn2);
+ __ Mov(x0, lr);
+ __ Mov(x2, 84);
+ __ Blr(x0);
+
+ __ Bind(&after_fn2);
+ __ Bl(&fn2);
+ __ Mov(x3, lr);
+
+ __ Mov(lr, x29);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(core.xreg(3) + kInstructionSize, x0);
+ ASSERT_EQUAL_64(42, x1);
+ ASSERT_EQUAL_64(84, x2);
+
+ TEARDOWN();
+}
+
+
+TEST(compare_branch) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 0);
+ __ Mov(x2, 0);
+ __ Mov(x3, 0);
+ __ Mov(x4, 0);
+ __ Mov(x5, 0);
+ __ Mov(x16, 0);
+ __ Mov(x17, 42);
+
+ Label zt, zt_end;
+ __ Cbz(w16, &zt);
+ __ B(&zt_end);
+ __ Bind(&zt);
+ __ Mov(x0, 1);
+ __ Bind(&zt_end);
+
+ Label zf, zf_end;
+ __ Cbz(x17, &zf);
+ __ B(&zf_end);
+ __ Bind(&zf);
+ __ Mov(x1, 1);
+ __ Bind(&zf_end);
+
+ Label nzt, nzt_end;
+ __ Cbnz(w17, &nzt);
+ __ B(&nzt_end);
+ __ Bind(&nzt);
+ __ Mov(x2, 1);
+ __ Bind(&nzt_end);
+
+ Label nzf, nzf_end;
+ __ Cbnz(x16, &nzf);
+ __ B(&nzf_end);
+ __ Bind(&nzf);
+ __ Mov(x3, 1);
+ __ Bind(&nzf_end);
+
+ __ Mov(x18, 0xffffffff00000000UL);
+
+ Label a, a_end;
+ __ Cbz(w18, &a);
+ __ B(&a_end);
+ __ Bind(&a);
+ __ Mov(x4, 1);
+ __ Bind(&a_end);
+
+ Label b, b_end;
+ __ Cbnz(w18, &b);
+ __ B(&b_end);
+ __ Bind(&b);
+ __ Mov(x5, 1);
+ __ Bind(&b_end);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(0, x1);
+ ASSERT_EQUAL_64(1, x2);
+ ASSERT_EQUAL_64(0, x3);
+ ASSERT_EQUAL_64(1, x4);
+ ASSERT_EQUAL_64(0, x5);
+
+ TEARDOWN();
+}
+
+
+TEST(test_branch) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 0);
+ __ Mov(x2, 0);
+ __ Mov(x3, 0);
+ __ Mov(x16, 0xaaaaaaaaaaaaaaaaUL);
+
+ Label bz, bz_end;
+ __ Tbz(w16, 0, &bz);
+ __ B(&bz_end);
+ __ Bind(&bz);
+ __ Mov(x0, 1);
+ __ Bind(&bz_end);
+
+ Label bo, bo_end;
+ __ Tbz(x16, 63, &bo);
+ __ B(&bo_end);
+ __ Bind(&bo);
+ __ Mov(x1, 1);
+ __ Bind(&bo_end);
+
+ Label nbz, nbz_end;
+ __ Tbnz(x16, 61, &nbz);
+ __ B(&nbz_end);
+ __ Bind(&nbz);
+ __ Mov(x2, 1);
+ __ Bind(&nbz_end);
+
+ Label nbo, nbo_end;
+ __ Tbnz(w16, 2, &nbo);
+ __ B(&nbo_end);
+ __ Bind(&nbo);
+ __ Mov(x3, 1);
+ __ Bind(&nbo_end);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(0, x1);
+ ASSERT_EQUAL_64(1, x2);
+ ASSERT_EQUAL_64(0, x3);
+
+ TEARDOWN();
+}
+
+
+TEST(far_branch_backward) {
+ INIT_V8();
+
+ // Test that the MacroAssembler correctly resolves backward branches to labels
+ // that are outside the immediate range of branch instructions.
+ int max_range =
+ std::max(Instruction::ImmBranchRange(TestBranchType),
+ std::max(Instruction::ImmBranchRange(CompareBranchType),
+ Instruction::ImmBranchRange(CondBranchType)));
+
+ SETUP_SIZE(max_range + 1000 * kInstructionSize);
+
+ START();
+
+ Label done, fail;
+ Label test_tbz, test_cbz, test_bcond;
+ Label success_tbz, success_cbz, success_bcond;
+
+ __ Mov(x0, 0);
+ __ Mov(x1, 1);
+ __ Mov(x10, 0);
+
+ __ B(&test_tbz);
+ __ Bind(&success_tbz);
+ __ Orr(x0, x0, 1 << 0);
+ __ B(&test_cbz);
+ __ Bind(&success_cbz);
+ __ Orr(x0, x0, 1 << 1);
+ __ B(&test_bcond);
+ __ Bind(&success_bcond);
+ __ Orr(x0, x0, 1 << 2);
+
+ __ B(&done);
+
+ // Generate enough code to overflow the immediate range of the three types of
+ // branches below.
+ for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
+ if (i % 100 == 0) {
+ // If we do land in this code, we do not want to execute so many nops
+ // before reaching the end of test (especially if tracing is activated).
+ __ B(&fail);
+ } else {
+ __ Nop();
+ }
+ }
+ __ B(&fail);
+
+ __ Bind(&test_tbz);
+ __ Tbz(x10, 7, &success_tbz);
+ __ Bind(&test_cbz);
+ __ Cbz(x10, &success_cbz);
+ __ Bind(&test_bcond);
+ __ Cmp(x10, 0);
+ __ B(eq, &success_bcond);
+
+ // For each out-of-range branch instructions, at least two instructions should
+ // have been generated.
+ CHECK_GE(7 * kInstructionSize, __ SizeOfCodeGeneratedSince(&test_tbz));
+
+ __ Bind(&fail);
+ __ Mov(x1, 0);
+ __ Bind(&done);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x7, x0);
+ ASSERT_EQUAL_64(0x1, x1);
+
+ TEARDOWN();
+}
+
+
+TEST(far_branch_simple_veneer) {
+ INIT_V8();
+
+ // Test that the MacroAssembler correctly emits veneers for forward branches
+ // to labels that are outside the immediate range of branch instructions.
+ int max_range =
+ std::max(Instruction::ImmBranchRange(TestBranchType),
+ std::max(Instruction::ImmBranchRange(CompareBranchType),
+ Instruction::ImmBranchRange(CondBranchType)));
+
+ SETUP_SIZE(max_range + 1000 * kInstructionSize);
+
+ START();
+
+ Label done, fail;
+ Label test_tbz, test_cbz, test_bcond;
+ Label success_tbz, success_cbz, success_bcond;
+
+ __ Mov(x0, 0);
+ __ Mov(x1, 1);
+ __ Mov(x10, 0);
+
+ __ Bind(&test_tbz);
+ __ Tbz(x10, 7, &success_tbz);
+ __ Bind(&test_cbz);
+ __ Cbz(x10, &success_cbz);
+ __ Bind(&test_bcond);
+ __ Cmp(x10, 0);
+ __ B(eq, &success_bcond);
+
+ // Generate enough code to overflow the immediate range of the three types of
+ // branches below.
+ for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
+ if (i % 100 == 0) {
+ // If we do land in this code, we do not want to execute so many nops
+ // before reaching the end of test (especially if tracing is activated).
+ // Also, the branches give the MacroAssembler the opportunity to emit the
+ // veneers.
+ __ B(&fail);
+ } else {
+ __ Nop();
+ }
+ }
+ __ B(&fail);
+
+ __ Bind(&success_tbz);
+ __ Orr(x0, x0, 1 << 0);
+ __ B(&test_cbz);
+ __ Bind(&success_cbz);
+ __ Orr(x0, x0, 1 << 1);
+ __ B(&test_bcond);
+ __ Bind(&success_bcond);
+ __ Orr(x0, x0, 1 << 2);
+
+ __ B(&done);
+ __ Bind(&fail);
+ __ Mov(x1, 0);
+ __ Bind(&done);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x7, x0);
+ ASSERT_EQUAL_64(0x1, x1);
+
+ TEARDOWN();
+}
+
+
+TEST(far_branch_veneer_link_chain) {
+ INIT_V8();
+
+ // Test that the MacroAssembler correctly emits veneers for forward branches
+ // that target out-of-range labels and are part of multiple instructions
+ // jumping to that label.
+ //
+ // We test the three situations with the different types of instruction:
+ // (1)- When the branch is at the start of the chain with tbz.
+ // (2)- When the branch is in the middle of the chain with cbz.
+ // (3)- When the branch is at the end of the chain with bcond.
+ int max_range =
+ std::max(Instruction::ImmBranchRange(TestBranchType),
+ std::max(Instruction::ImmBranchRange(CompareBranchType),
+ Instruction::ImmBranchRange(CondBranchType)));
+
+ SETUP_SIZE(max_range + 1000 * kInstructionSize);
+
+ START();
+
+ Label skip, fail, done;
+ Label test_tbz, test_cbz, test_bcond;
+ Label success_tbz, success_cbz, success_bcond;
+
+ __ Mov(x0, 0);
+ __ Mov(x1, 1);
+ __ Mov(x10, 0);
+
+ __ B(&skip);
+ // Branches at the start of the chain for situations (2) and (3).
+ __ B(&success_cbz);
+ __ B(&success_bcond);
+ __ Nop();
+ __ B(&success_bcond);
+ __ B(&success_cbz);
+ __ Bind(&skip);
+
+ __ Bind(&test_tbz);
+ __ Tbz(x10, 7, &success_tbz);
+ __ Bind(&test_cbz);
+ __ Cbz(x10, &success_cbz);
+ __ Bind(&test_bcond);
+ __ Cmp(x10, 0);
+ __ B(eq, &success_bcond);
+
+ skip.Unuse();
+ __ B(&skip);
+ // Branches at the end of the chain for situations (1) and (2).
+ __ B(&success_cbz);
+ __ B(&success_tbz);
+ __ Nop();
+ __ B(&success_tbz);
+ __ B(&success_cbz);
+ __ Bind(&skip);
+
+ // Generate enough code to overflow the immediate range of the three types of
+ // branches below.
+ for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
+ if (i % 100 == 0) {
+ // If we do land in this code, we do not want to execute so many nops
+ // before reaching the end of test (especially if tracing is activated).
+ // Also, the branches give the MacroAssembler the opportunity to emit the
+ // veneers.
+ __ B(&fail);
+ } else {
+ __ Nop();
+ }
+ }
+ __ B(&fail);
+
+ __ Bind(&success_tbz);
+ __ Orr(x0, x0, 1 << 0);
+ __ B(&test_cbz);
+ __ Bind(&success_cbz);
+ __ Orr(x0, x0, 1 << 1);
+ __ B(&test_bcond);
+ __ Bind(&success_bcond);
+ __ Orr(x0, x0, 1 << 2);
+
+ __ B(&done);
+ __ Bind(&fail);
+ __ Mov(x1, 0);
+ __ Bind(&done);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x7, x0);
+ ASSERT_EQUAL_64(0x1, x1);
+
+ TEARDOWN();
+}
+
+
+TEST(far_branch_veneer_broken_link_chain) {
+ INIT_V8();
+
+ // Check that the MacroAssembler correctly handles the situation when removing
+ // a branch from the link chain of a label and the two links on each side of
+ // the removed branch cannot be linked together (out of range).
+ //
+ // We test with tbz because it has a small range.
+ int max_range = Instruction::ImmBranchRange(TestBranchType);
+ int inter_range = max_range / 2 + max_range / 10;
+
+ SETUP_SIZE(3 * inter_range + 1000 * kInstructionSize);
+
+ START();
+
+ Label skip, fail, done;
+ Label test_1, test_2, test_3;
+ Label far_target;
+
+ __ Mov(x0, 0); // Indicates the origin of the branch.
+ __ Mov(x1, 1);
+ __ Mov(x10, 0);
+
+ // First instruction in the label chain.
+ __ Bind(&test_1);
+ __ Mov(x0, 1);
+ __ B(&far_target);
+
+ for (unsigned i = 0; i < inter_range / kInstructionSize; ++i) {
+ if (i % 100 == 0) {
+ // Do not allow generating veneers. They should not be needed.
+ __ b(&fail);
+ } else {
+ __ Nop();
+ }
+ }
+
+ // Will need a veneer to point to reach the target.
+ __ Bind(&test_2);
+ __ Mov(x0, 2);
+ __ Tbz(x10, 7, &far_target);
+
+ for (unsigned i = 0; i < inter_range / kInstructionSize; ++i) {
+ if (i % 100 == 0) {
+ // Do not allow generating veneers. They should not be needed.
+ __ b(&fail);
+ } else {
+ __ Nop();
+ }
+ }
+
+ // Does not need a veneer to reach the target, but the initial branch
+ // instruction is out of range.
+ __ Bind(&test_3);
+ __ Mov(x0, 3);
+ __ Tbz(x10, 7, &far_target);
+
+ for (unsigned i = 0; i < inter_range / kInstructionSize; ++i) {
+ if (i % 100 == 0) {
+ // Allow generating veneers.
+ __ B(&fail);
+ } else {
+ __ Nop();
+ }
+ }
+
+ __ B(&fail);
+
+ __ Bind(&far_target);
+ __ Cmp(x0, 1);
+ __ B(eq, &test_2);
+ __ Cmp(x0, 2);
+ __ B(eq, &test_3);
+
+ __ B(&done);
+ __ Bind(&fail);
+ __ Mov(x1, 0);
+ __ Bind(&done);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x3, x0);
+ ASSERT_EQUAL_64(0x1, x1);
+
+ TEARDOWN();
+}
+
+
+TEST(branch_type) {
+ INIT_V8();
+
+ SETUP();
+
+ Label fail, done;
+
+ START();
+ __ Mov(x0, 0x0);
+ __ Mov(x10, 0x7);
+ __ Mov(x11, 0x0);
+
+ // Test non taken branches.
+ __ Cmp(x10, 0x7);
+ __ B(&fail, ne);
+ __ B(&fail, never);
+ __ B(&fail, reg_zero, x10);
+ __ B(&fail, reg_not_zero, x11);
+ __ B(&fail, reg_bit_clear, x10, 0);
+ __ B(&fail, reg_bit_set, x10, 3);
+
+ // Test taken branches.
+ Label l1, l2, l3, l4, l5;
+ __ Cmp(x10, 0x7);
+ __ B(&l1, eq);
+ __ B(&fail);
+ __ Bind(&l1);
+ __ B(&l2, always);
+ __ B(&fail);
+ __ Bind(&l2);
+ __ B(&l3, reg_not_zero, x10);
+ __ B(&fail);
+ __ Bind(&l3);
+ __ B(&l4, reg_bit_clear, x10, 15);
+ __ B(&fail);
+ __ Bind(&l4);
+ __ B(&l5, reg_bit_set, x10, 1);
+ __ B(&fail);
+ __ Bind(&l5);
+
+ __ B(&done);
+
+ __ Bind(&fail);
+ __ Mov(x0, 0x1);
+
+ __ Bind(&done);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x0, x0);
+
+ TEARDOWN();
+}
+
+
+TEST(ldr_str_offset) {
+ INIT_V8();
+ SETUP();
+
+ uint64_t src[2] = {0xfedcba9876543210UL, 0x0123456789abcdefUL};
+ uint64_t dst[5] = {0, 0, 0, 0, 0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x17, src_base);
+ __ Mov(x18, dst_base);
+ __ Ldr(w0, MemOperand(x17));
+ __ Str(w0, MemOperand(x18));
+ __ Ldr(w1, MemOperand(x17, 4));
+ __ Str(w1, MemOperand(x18, 12));
+ __ Ldr(x2, MemOperand(x17, 8));
+ __ Str(x2, MemOperand(x18, 16));
+ __ Ldrb(w3, MemOperand(x17, 1));
+ __ Strb(w3, MemOperand(x18, 25));
+ __ Ldrh(w4, MemOperand(x17, 2));
+ __ Strh(w4, MemOperand(x18, 33));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x76543210, x0);
+ ASSERT_EQUAL_64(0x76543210, dst[0]);
+ ASSERT_EQUAL_64(0xfedcba98, x1);
+ ASSERT_EQUAL_64(0xfedcba9800000000UL, dst[1]);
+ ASSERT_EQUAL_64(0x0123456789abcdefUL, x2);
+ ASSERT_EQUAL_64(0x0123456789abcdefUL, dst[2]);
+ ASSERT_EQUAL_64(0x32, x3);
+ ASSERT_EQUAL_64(0x3200, dst[3]);
+ ASSERT_EQUAL_64(0x7654, x4);
+ ASSERT_EQUAL_64(0x765400, dst[4]);
+ ASSERT_EQUAL_64(src_base, x17);
+ ASSERT_EQUAL_64(dst_base, x18);
+
+ TEARDOWN();
+}
+
+
+TEST(ldr_str_wide) {
+ INIT_V8();
+ SETUP();
+
+ uint32_t src[8192];
+ uint32_t dst[8192];
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+ memset(src, 0xaa, 8192 * sizeof(src[0]));
+ memset(dst, 0xaa, 8192 * sizeof(dst[0]));
+ src[0] = 0;
+ src[6144] = 6144;
+ src[8191] = 8191;
+
+ START();
+ __ Mov(x22, src_base);
+ __ Mov(x23, dst_base);
+ __ Mov(x24, src_base);
+ __ Mov(x25, dst_base);
+ __ Mov(x26, src_base);
+ __ Mov(x27, dst_base);
+
+ __ Ldr(w0, MemOperand(x22, 8191 * sizeof(src[0])));
+ __ Str(w0, MemOperand(x23, 8191 * sizeof(dst[0])));
+ __ Ldr(w1, MemOperand(x24, 4096 * sizeof(src[0]), PostIndex));
+ __ Str(w1, MemOperand(x25, 4096 * sizeof(dst[0]), PostIndex));
+ __ Ldr(w2, MemOperand(x26, 6144 * sizeof(src[0]), PreIndex));
+ __ Str(w2, MemOperand(x27, 6144 * sizeof(dst[0]), PreIndex));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_32(8191, w0);
+ ASSERT_EQUAL_32(8191, dst[8191]);
+ ASSERT_EQUAL_64(src_base, x22);
+ ASSERT_EQUAL_64(dst_base, x23);
+ ASSERT_EQUAL_32(0, w1);
+ ASSERT_EQUAL_32(0, dst[0]);
+ ASSERT_EQUAL_64(src_base + 4096 * sizeof(src[0]), x24);
+ ASSERT_EQUAL_64(dst_base + 4096 * sizeof(dst[0]), x25);
+ ASSERT_EQUAL_32(6144, w2);
+ ASSERT_EQUAL_32(6144, dst[6144]);
+ ASSERT_EQUAL_64(src_base + 6144 * sizeof(src[0]), x26);
+ ASSERT_EQUAL_64(dst_base + 6144 * sizeof(dst[0]), x27);
+
+ TEARDOWN();
+}
+
+
+TEST(ldr_str_preindex) {
+ INIT_V8();
+ SETUP();
+
+ uint64_t src[2] = {0xfedcba9876543210UL, 0x0123456789abcdefUL};
+ uint64_t dst[6] = {0, 0, 0, 0, 0, 0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x17, src_base);
+ __ Mov(x18, dst_base);
+ __ Mov(x19, src_base);
+ __ Mov(x20, dst_base);
+ __ Mov(x21, src_base + 16);
+ __ Mov(x22, dst_base + 40);
+ __ Mov(x23, src_base);
+ __ Mov(x24, dst_base);
+ __ Mov(x25, src_base);
+ __ Mov(x26, dst_base);
+ __ Ldr(w0, MemOperand(x17, 4, PreIndex));
+ __ Str(w0, MemOperand(x18, 12, PreIndex));
+ __ Ldr(x1, MemOperand(x19, 8, PreIndex));
+ __ Str(x1, MemOperand(x20, 16, PreIndex));
+ __ Ldr(w2, MemOperand(x21, -4, PreIndex));
+ __ Str(w2, MemOperand(x22, -4, PreIndex));
+ __ Ldrb(w3, MemOperand(x23, 1, PreIndex));
+ __ Strb(w3, MemOperand(x24, 25, PreIndex));
+ __ Ldrh(w4, MemOperand(x25, 3, PreIndex));
+ __ Strh(w4, MemOperand(x26, 41, PreIndex));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xfedcba98, x0);
+ ASSERT_EQUAL_64(0xfedcba9800000000UL, dst[1]);
+ ASSERT_EQUAL_64(0x0123456789abcdefUL, x1);
+ ASSERT_EQUAL_64(0x0123456789abcdefUL, dst[2]);
+ ASSERT_EQUAL_64(0x01234567, x2);
+ ASSERT_EQUAL_64(0x0123456700000000UL, dst[4]);
+ ASSERT_EQUAL_64(0x32, x3);
+ ASSERT_EQUAL_64(0x3200, dst[3]);
+ ASSERT_EQUAL_64(0x9876, x4);
+ ASSERT_EQUAL_64(0x987600, dst[5]);
+ ASSERT_EQUAL_64(src_base + 4, x17);
+ ASSERT_EQUAL_64(dst_base + 12, x18);
+ ASSERT_EQUAL_64(src_base + 8, x19);
+ ASSERT_EQUAL_64(dst_base + 16, x20);
+ ASSERT_EQUAL_64(src_base + 12, x21);
+ ASSERT_EQUAL_64(dst_base + 36, x22);
+ ASSERT_EQUAL_64(src_base + 1, x23);
+ ASSERT_EQUAL_64(dst_base + 25, x24);
+ ASSERT_EQUAL_64(src_base + 3, x25);
+ ASSERT_EQUAL_64(dst_base + 41, x26);
+
+ TEARDOWN();
+}
+
+
+TEST(ldr_str_postindex) {
+ INIT_V8();
+ SETUP();
+
+ uint64_t src[2] = {0xfedcba9876543210UL, 0x0123456789abcdefUL};
+ uint64_t dst[6] = {0, 0, 0, 0, 0, 0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x17, src_base + 4);
+ __ Mov(x18, dst_base + 12);
+ __ Mov(x19, src_base + 8);
+ __ Mov(x20, dst_base + 16);
+ __ Mov(x21, src_base + 8);
+ __ Mov(x22, dst_base + 32);
+ __ Mov(x23, src_base + 1);
+ __ Mov(x24, dst_base + 25);
+ __ Mov(x25, src_base + 3);
+ __ Mov(x26, dst_base + 41);
+ __ Ldr(w0, MemOperand(x17, 4, PostIndex));
+ __ Str(w0, MemOperand(x18, 12, PostIndex));
+ __ Ldr(x1, MemOperand(x19, 8, PostIndex));
+ __ Str(x1, MemOperand(x20, 16, PostIndex));
+ __ Ldr(x2, MemOperand(x21, -8, PostIndex));
+ __ Str(x2, MemOperand(x22, -32, PostIndex));
+ __ Ldrb(w3, MemOperand(x23, 1, PostIndex));
+ __ Strb(w3, MemOperand(x24, 5, PostIndex));
+ __ Ldrh(w4, MemOperand(x25, -3, PostIndex));
+ __ Strh(w4, MemOperand(x26, -41, PostIndex));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xfedcba98, x0);
+ ASSERT_EQUAL_64(0xfedcba9800000000UL, dst[1]);
+ ASSERT_EQUAL_64(0x0123456789abcdefUL, x1);
+ ASSERT_EQUAL_64(0x0123456789abcdefUL, dst[2]);
+ ASSERT_EQUAL_64(0x0123456789abcdefUL, x2);
+ ASSERT_EQUAL_64(0x0123456789abcdefUL, dst[4]);
+ ASSERT_EQUAL_64(0x32, x3);
+ ASSERT_EQUAL_64(0x3200, dst[3]);
+ ASSERT_EQUAL_64(0x9876, x4);
+ ASSERT_EQUAL_64(0x987600, dst[5]);
+ ASSERT_EQUAL_64(src_base + 8, x17);
+ ASSERT_EQUAL_64(dst_base + 24, x18);
+ ASSERT_EQUAL_64(src_base + 16, x19);
+ ASSERT_EQUAL_64(dst_base + 32, x20);
+ ASSERT_EQUAL_64(src_base, x21);
+ ASSERT_EQUAL_64(dst_base, x22);
+ ASSERT_EQUAL_64(src_base + 2, x23);
+ ASSERT_EQUAL_64(dst_base + 30, x24);
+ ASSERT_EQUAL_64(src_base, x25);
+ ASSERT_EQUAL_64(dst_base, x26);
+
+ TEARDOWN();
+}
+
+
+TEST(load_signed) {
+ INIT_V8();
+ SETUP();
+
+ uint32_t src[2] = {0x80008080, 0x7fff7f7f};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+
+ START();
+ __ Mov(x24, src_base);
+ __ Ldrsb(w0, MemOperand(x24));
+ __ Ldrsb(w1, MemOperand(x24, 4));
+ __ Ldrsh(w2, MemOperand(x24));
+ __ Ldrsh(w3, MemOperand(x24, 4));
+ __ Ldrsb(x4, MemOperand(x24));
+ __ Ldrsb(x5, MemOperand(x24, 4));
+ __ Ldrsh(x6, MemOperand(x24));
+ __ Ldrsh(x7, MemOperand(x24, 4));
+ __ Ldrsw(x8, MemOperand(x24));
+ __ Ldrsw(x9, MemOperand(x24, 4));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xffffff80, x0);
+ ASSERT_EQUAL_64(0x0000007f, x1);
+ ASSERT_EQUAL_64(0xffff8080, x2);
+ ASSERT_EQUAL_64(0x00007f7f, x3);
+ ASSERT_EQUAL_64(0xffffffffffffff80UL, x4);
+ ASSERT_EQUAL_64(0x000000000000007fUL, x5);
+ ASSERT_EQUAL_64(0xffffffffffff8080UL, x6);
+ ASSERT_EQUAL_64(0x0000000000007f7fUL, x7);
+ ASSERT_EQUAL_64(0xffffffff80008080UL, x8);
+ ASSERT_EQUAL_64(0x000000007fff7f7fUL, x9);
+
+ TEARDOWN();
+}
+
+
+TEST(load_store_regoffset) {
+ INIT_V8();
+ SETUP();
+
+ uint32_t src[3] = {1, 2, 3};
+ uint32_t dst[4] = {0, 0, 0, 0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x16, src_base);
+ __ Mov(x17, dst_base);
+ __ Mov(x18, src_base + 3 * sizeof(src[0]));
+ __ Mov(x19, dst_base + 3 * sizeof(dst[0]));
+ __ Mov(x20, dst_base + 4 * sizeof(dst[0]));
+ __ Mov(x24, 0);
+ __ Mov(x25, 4);
+ __ Mov(x26, -4);
+ __ Mov(x27, 0xfffffffc); // 32-bit -4.
+ __ Mov(x28, 0xfffffffe); // 32-bit -2.
+ __ Mov(x29, 0xffffffff); // 32-bit -1.
+
+ __ Ldr(w0, MemOperand(x16, x24));
+ __ Ldr(x1, MemOperand(x16, x25));
+ __ Ldr(w2, MemOperand(x18, x26));
+ __ Ldr(w3, MemOperand(x18, x27, SXTW));
+ __ Ldr(w4, MemOperand(x18, x28, SXTW, 2));
+ __ Str(w0, MemOperand(x17, x24));
+ __ Str(x1, MemOperand(x17, x25));
+ __ Str(w2, MemOperand(x20, x29, SXTW, 2));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(0x0000000300000002UL, x1);
+ ASSERT_EQUAL_64(3, x2);
+ ASSERT_EQUAL_64(3, x3);
+ ASSERT_EQUAL_64(2, x4);
+ ASSERT_EQUAL_32(1, dst[0]);
+ ASSERT_EQUAL_32(2, dst[1]);
+ ASSERT_EQUAL_32(3, dst[2]);
+ ASSERT_EQUAL_32(3, dst[3]);
+
+ TEARDOWN();
+}
+
+
+TEST(load_store_float) {
+ INIT_V8();
+ SETUP();
+
+ float src[3] = {1.0, 2.0, 3.0};
+ float dst[3] = {0.0, 0.0, 0.0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x17, src_base);
+ __ Mov(x18, dst_base);
+ __ Mov(x19, src_base);
+ __ Mov(x20, dst_base);
+ __ Mov(x21, src_base);
+ __ Mov(x22, dst_base);
+ __ Ldr(s0, MemOperand(x17, sizeof(src[0])));
+ __ Str(s0, MemOperand(x18, sizeof(dst[0]), PostIndex));
+ __ Ldr(s1, MemOperand(x19, sizeof(src[0]), PostIndex));
+ __ Str(s1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex));
+ __ Ldr(s2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
+ __ Str(s2, MemOperand(x22, sizeof(dst[0])));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(2.0, s0);
+ ASSERT_EQUAL_FP32(2.0, dst[0]);
+ ASSERT_EQUAL_FP32(1.0, s1);
+ ASSERT_EQUAL_FP32(1.0, dst[2]);
+ ASSERT_EQUAL_FP32(3.0, s2);
+ ASSERT_EQUAL_FP32(3.0, dst[1]);
+ ASSERT_EQUAL_64(src_base, x17);
+ ASSERT_EQUAL_64(dst_base + sizeof(dst[0]), x18);
+ ASSERT_EQUAL_64(src_base + sizeof(src[0]), x19);
+ ASSERT_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
+ ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
+ ASSERT_EQUAL_64(dst_base, x22);
+
+ TEARDOWN();
+}
+
+
+TEST(load_store_double) {
+ INIT_V8();
+ SETUP();
+
+ double src[3] = {1.0, 2.0, 3.0};
+ double dst[3] = {0.0, 0.0, 0.0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x17, src_base);
+ __ Mov(x18, dst_base);
+ __ Mov(x19, src_base);
+ __ Mov(x20, dst_base);
+ __ Mov(x21, src_base);
+ __ Mov(x22, dst_base);
+ __ Ldr(d0, MemOperand(x17, sizeof(src[0])));
+ __ Str(d0, MemOperand(x18, sizeof(dst[0]), PostIndex));
+ __ Ldr(d1, MemOperand(x19, sizeof(src[0]), PostIndex));
+ __ Str(d1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex));
+ __ Ldr(d2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
+ __ Str(d2, MemOperand(x22, sizeof(dst[0])));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP64(2.0, d0);
+ ASSERT_EQUAL_FP64(2.0, dst[0]);
+ ASSERT_EQUAL_FP64(1.0, d1);
+ ASSERT_EQUAL_FP64(1.0, dst[2]);
+ ASSERT_EQUAL_FP64(3.0, d2);
+ ASSERT_EQUAL_FP64(3.0, dst[1]);
+ ASSERT_EQUAL_64(src_base, x17);
+ ASSERT_EQUAL_64(dst_base + sizeof(dst[0]), x18);
+ ASSERT_EQUAL_64(src_base + sizeof(src[0]), x19);
+ ASSERT_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
+ ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
+ ASSERT_EQUAL_64(dst_base, x22);
+
+ TEARDOWN();
+}
+
+
+TEST(ldp_stp_float) {
+ INIT_V8();
+ SETUP();
+
+ float src[2] = {1.0, 2.0};
+ float dst[3] = {0.0, 0.0, 0.0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x16, src_base);
+ __ Mov(x17, dst_base);
+ __ Ldp(s31, s0, MemOperand(x16, 2 * sizeof(src[0]), PostIndex));
+ __ Stp(s0, s31, MemOperand(x17, sizeof(dst[1]), PreIndex));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(1.0, s31);
+ ASSERT_EQUAL_FP32(2.0, s0);
+ ASSERT_EQUAL_FP32(0.0, dst[0]);
+ ASSERT_EQUAL_FP32(2.0, dst[1]);
+ ASSERT_EQUAL_FP32(1.0, dst[2]);
+ ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x16);
+ ASSERT_EQUAL_64(dst_base + sizeof(dst[1]), x17);
+
+ TEARDOWN();
+}
+
+
+TEST(ldp_stp_double) {
+ INIT_V8();
+ SETUP();
+
+ double src[2] = {1.0, 2.0};
+ double dst[3] = {0.0, 0.0, 0.0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x16, src_base);
+ __ Mov(x17, dst_base);
+ __ Ldp(d31, d0, MemOperand(x16, 2 * sizeof(src[0]), PostIndex));
+ __ Stp(d0, d31, MemOperand(x17, sizeof(dst[1]), PreIndex));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP64(1.0, d31);
+ ASSERT_EQUAL_FP64(2.0, d0);
+ ASSERT_EQUAL_FP64(0.0, dst[0]);
+ ASSERT_EQUAL_FP64(2.0, dst[1]);
+ ASSERT_EQUAL_FP64(1.0, dst[2]);
+ ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x16);
+ ASSERT_EQUAL_64(dst_base + sizeof(dst[1]), x17);
+
+ TEARDOWN();
+}
+
+
+TEST(ldp_stp_offset) {
+ INIT_V8();
+ SETUP();
+
+ uint64_t src[3] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
+ 0xffeeddccbbaa9988UL};
+ uint64_t dst[7] = {0, 0, 0, 0, 0, 0, 0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x16, src_base);
+ __ Mov(x17, dst_base);
+ __ Mov(x18, src_base + 24);
+ __ Mov(x19, dst_base + 56);
+ __ Ldp(w0, w1, MemOperand(x16));
+ __ Ldp(w2, w3, MemOperand(x16, 4));
+ __ Ldp(x4, x5, MemOperand(x16, 8));
+ __ Ldp(w6, w7, MemOperand(x18, -12));
+ __ Ldp(x8, x9, MemOperand(x18, -16));
+ __ Stp(w0, w1, MemOperand(x17));
+ __ Stp(w2, w3, MemOperand(x17, 8));
+ __ Stp(x4, x5, MemOperand(x17, 16));
+ __ Stp(w6, w7, MemOperand(x19, -24));
+ __ Stp(x8, x9, MemOperand(x19, -16));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x44556677, x0);
+ ASSERT_EQUAL_64(0x00112233, x1);
+ ASSERT_EQUAL_64(0x0011223344556677UL, dst[0]);
+ ASSERT_EQUAL_64(0x00112233, x2);
+ ASSERT_EQUAL_64(0xccddeeff, x3);
+ ASSERT_EQUAL_64(0xccddeeff00112233UL, dst[1]);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x4);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[2]);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x5);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[3]);
+ ASSERT_EQUAL_64(0x8899aabb, x6);
+ ASSERT_EQUAL_64(0xbbaa9988, x7);
+ ASSERT_EQUAL_64(0xbbaa99888899aabbUL, dst[4]);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x8);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[5]);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x9);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[6]);
+ ASSERT_EQUAL_64(src_base, x16);
+ ASSERT_EQUAL_64(dst_base, x17);
+ ASSERT_EQUAL_64(src_base + 24, x18);
+ ASSERT_EQUAL_64(dst_base + 56, x19);
+
+ TEARDOWN();
+}
+
+
+TEST(ldnp_stnp_offset) {
+ INIT_V8();
+ SETUP();
+
+ uint64_t src[3] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
+ 0xffeeddccbbaa9988UL};
+ uint64_t dst[7] = {0, 0, 0, 0, 0, 0, 0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x16, src_base);
+ __ Mov(x17, dst_base);
+ __ Mov(x18, src_base + 24);
+ __ Mov(x19, dst_base + 56);
+ __ Ldnp(w0, w1, MemOperand(x16));
+ __ Ldnp(w2, w3, MemOperand(x16, 4));
+ __ Ldnp(x4, x5, MemOperand(x16, 8));
+ __ Ldnp(w6, w7, MemOperand(x18, -12));
+ __ Ldnp(x8, x9, MemOperand(x18, -16));
+ __ Stnp(w0, w1, MemOperand(x17));
+ __ Stnp(w2, w3, MemOperand(x17, 8));
+ __ Stnp(x4, x5, MemOperand(x17, 16));
+ __ Stnp(w6, w7, MemOperand(x19, -24));
+ __ Stnp(x8, x9, MemOperand(x19, -16));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x44556677, x0);
+ ASSERT_EQUAL_64(0x00112233, x1);
+ ASSERT_EQUAL_64(0x0011223344556677UL, dst[0]);
+ ASSERT_EQUAL_64(0x00112233, x2);
+ ASSERT_EQUAL_64(0xccddeeff, x3);
+ ASSERT_EQUAL_64(0xccddeeff00112233UL, dst[1]);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x4);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[2]);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x5);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[3]);
+ ASSERT_EQUAL_64(0x8899aabb, x6);
+ ASSERT_EQUAL_64(0xbbaa9988, x7);
+ ASSERT_EQUAL_64(0xbbaa99888899aabbUL, dst[4]);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x8);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[5]);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x9);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[6]);
+ ASSERT_EQUAL_64(src_base, x16);
+ ASSERT_EQUAL_64(dst_base, x17);
+ ASSERT_EQUAL_64(src_base + 24, x18);
+ ASSERT_EQUAL_64(dst_base + 56, x19);
+
+ TEARDOWN();
+}
+
+
+TEST(ldp_stp_preindex) {
+ INIT_V8();
+ SETUP();
+
+ uint64_t src[3] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
+ 0xffeeddccbbaa9988UL};
+ uint64_t dst[5] = {0, 0, 0, 0, 0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x16, src_base);
+ __ Mov(x17, dst_base);
+ __ Mov(x18, dst_base + 16);
+ __ Ldp(w0, w1, MemOperand(x16, 4, PreIndex));
+ __ Mov(x19, x16);
+ __ Ldp(w2, w3, MemOperand(x16, -4, PreIndex));
+ __ Stp(w2, w3, MemOperand(x17, 4, PreIndex));
+ __ Mov(x20, x17);
+ __ Stp(w0, w1, MemOperand(x17, -4, PreIndex));
+ __ Ldp(x4, x5, MemOperand(x16, 8, PreIndex));
+ __ Mov(x21, x16);
+ __ Ldp(x6, x7, MemOperand(x16, -8, PreIndex));
+ __ Stp(x7, x6, MemOperand(x18, 8, PreIndex));
+ __ Mov(x22, x18);
+ __ Stp(x5, x4, MemOperand(x18, -8, PreIndex));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x00112233, x0);
+ ASSERT_EQUAL_64(0xccddeeff, x1);
+ ASSERT_EQUAL_64(0x44556677, x2);
+ ASSERT_EQUAL_64(0x00112233, x3);
+ ASSERT_EQUAL_64(0xccddeeff00112233UL, dst[0]);
+ ASSERT_EQUAL_64(0x0000000000112233UL, dst[1]);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x4);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x5);
+ ASSERT_EQUAL_64(0x0011223344556677UL, x6);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x7);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[2]);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[3]);
+ ASSERT_EQUAL_64(0x0011223344556677UL, dst[4]);
+ ASSERT_EQUAL_64(src_base, x16);
+ ASSERT_EQUAL_64(dst_base, x17);
+ ASSERT_EQUAL_64(dst_base + 16, x18);
+ ASSERT_EQUAL_64(src_base + 4, x19);
+ ASSERT_EQUAL_64(dst_base + 4, x20);
+ ASSERT_EQUAL_64(src_base + 8, x21);
+ ASSERT_EQUAL_64(dst_base + 24, x22);
+
+ TEARDOWN();
+}
+
+
+TEST(ldp_stp_postindex) {
+ INIT_V8();
+ SETUP();
+
+ uint64_t src[4] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
+ 0xffeeddccbbaa9988UL, 0x7766554433221100UL};
+ uint64_t dst[5] = {0, 0, 0, 0, 0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x16, src_base);
+ __ Mov(x17, dst_base);
+ __ Mov(x18, dst_base + 16);
+ __ Ldp(w0, w1, MemOperand(x16, 4, PostIndex));
+ __ Mov(x19, x16);
+ __ Ldp(w2, w3, MemOperand(x16, -4, PostIndex));
+ __ Stp(w2, w3, MemOperand(x17, 4, PostIndex));
+ __ Mov(x20, x17);
+ __ Stp(w0, w1, MemOperand(x17, -4, PostIndex));
+ __ Ldp(x4, x5, MemOperand(x16, 8, PostIndex));
+ __ Mov(x21, x16);
+ __ Ldp(x6, x7, MemOperand(x16, -8, PostIndex));
+ __ Stp(x7, x6, MemOperand(x18, 8, PostIndex));
+ __ Mov(x22, x18);
+ __ Stp(x5, x4, MemOperand(x18, -8, PostIndex));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x44556677, x0);
+ ASSERT_EQUAL_64(0x00112233, x1);
+ ASSERT_EQUAL_64(0x00112233, x2);
+ ASSERT_EQUAL_64(0xccddeeff, x3);
+ ASSERT_EQUAL_64(0x4455667700112233UL, dst[0]);
+ ASSERT_EQUAL_64(0x0000000000112233UL, dst[1]);
+ ASSERT_EQUAL_64(0x0011223344556677UL, x4);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x5);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x6);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x7);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[2]);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[3]);
+ ASSERT_EQUAL_64(0x0011223344556677UL, dst[4]);
+ ASSERT_EQUAL_64(src_base, x16);
+ ASSERT_EQUAL_64(dst_base, x17);
+ ASSERT_EQUAL_64(dst_base + 16, x18);
+ ASSERT_EQUAL_64(src_base + 4, x19);
+ ASSERT_EQUAL_64(dst_base + 4, x20);
+ ASSERT_EQUAL_64(src_base + 8, x21);
+ ASSERT_EQUAL_64(dst_base + 24, x22);
+
+ TEARDOWN();
+}
+
+
+TEST(ldp_sign_extend) {
+ INIT_V8();
+ SETUP();
+
+ uint32_t src[2] = {0x80000000, 0x7fffffff};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+
+ START();
+ __ Mov(x24, src_base);
+ __ Ldpsw(x0, x1, MemOperand(x24));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xffffffff80000000UL, x0);
+ ASSERT_EQUAL_64(0x000000007fffffffUL, x1);
+
+ TEARDOWN();
+}
+
+
+TEST(ldur_stur) {
+ INIT_V8();
+ SETUP();
+
+ int64_t src[2] = {0x0123456789abcdefUL, 0x0123456789abcdefUL};
+ int64_t dst[5] = {0, 0, 0, 0, 0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x17, src_base);
+ __ Mov(x18, dst_base);
+ __ Mov(x19, src_base + 16);
+ __ Mov(x20, dst_base + 32);
+ __ Mov(x21, dst_base + 40);
+ __ Ldr(w0, MemOperand(x17, 1));
+ __ Str(w0, MemOperand(x18, 2));
+ __ Ldr(x1, MemOperand(x17, 3));
+ __ Str(x1, MemOperand(x18, 9));
+ __ Ldr(w2, MemOperand(x19, -9));
+ __ Str(w2, MemOperand(x20, -5));
+ __ Ldrb(w3, MemOperand(x19, -1));
+ __ Strb(w3, MemOperand(x21, -1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x6789abcd, x0);
+ ASSERT_EQUAL_64(0x6789abcd0000L, dst[0]);
+ ASSERT_EQUAL_64(0xabcdef0123456789L, x1);
+ ASSERT_EQUAL_64(0xcdef012345678900L, dst[1]);
+ ASSERT_EQUAL_64(0x000000ab, dst[2]);
+ ASSERT_EQUAL_64(0xabcdef01, x2);
+ ASSERT_EQUAL_64(0x00abcdef01000000L, dst[3]);
+ ASSERT_EQUAL_64(0x00000001, x3);
+ ASSERT_EQUAL_64(0x0100000000000000L, dst[4]);
+ ASSERT_EQUAL_64(src_base, x17);
+ ASSERT_EQUAL_64(dst_base, x18);
+ ASSERT_EQUAL_64(src_base + 16, x19);
+ ASSERT_EQUAL_64(dst_base + 32, x20);
+
+ TEARDOWN();
+}
+
+
+#if 0 // TODO(all) enable.
+// TODO(rodolph): Adapt w16 Literal tests for RelocInfo.
+TEST(ldr_literal) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Ldr(x2, 0x1234567890abcdefUL);
+ __ Ldr(w3, 0xfedcba09);
+ __ Ldr(d13, 1.234);
+ __ Ldr(s25, 2.5);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x1234567890abcdefUL, x2);
+ ASSERT_EQUAL_64(0xfedcba09, x3);
+ ASSERT_EQUAL_FP64(1.234, d13);
+ ASSERT_EQUAL_FP32(2.5, s25);
+
+ TEARDOWN();
+}
+
+
+static void LdrLiteralRangeHelper(ptrdiff_t range_,
+ LiteralPoolEmitOption option,
+ bool expect_dump) {
+ ASSERT(range_ > 0);
+ SETUP_SIZE(range_ + 1024);
+
+ Label label_1, label_2;
+
+ size_t range = static_cast<size_t>(range_);
+ size_t code_size = 0;
+ size_t pool_guard_size;
+
+ if (option == NoJumpRequired) {
+ // Space for an explicit branch.
+ pool_guard_size = sizeof(Instr);
+ } else {
+ pool_guard_size = 0;
+ }
+
+ START();
+ // Force a pool dump so the pool starts off empty.
+ __ EmitLiteralPool(JumpRequired);
+ ASSERT_LITERAL_POOL_SIZE(0);
+
+ __ Ldr(x0, 0x1234567890abcdefUL);
+ __ Ldr(w1, 0xfedcba09);
+ __ Ldr(d0, 1.234);
+ __ Ldr(s1, 2.5);
+ ASSERT_LITERAL_POOL_SIZE(4);
+
+ code_size += 4 * sizeof(Instr);
+
+ // Check that the requested range (allowing space for a branch over the pool)
+ // can be handled by this test.
+ ASSERT((code_size + pool_guard_size) <= range);
+
+ // Emit NOPs up to 'range', leaving space for the pool guard.
+ while ((code_size + pool_guard_size) < range) {
+ __ Nop();
+ code_size += sizeof(Instr);
+ }
+
+ // Emit the guard sequence before the literal pool.
+ if (option == NoJumpRequired) {
+ __ B(&label_1);
+ code_size += sizeof(Instr);
+ }
+
+ ASSERT(code_size == range);
+ ASSERT_LITERAL_POOL_SIZE(4);
+
+ // Possibly generate a literal pool.
+ __ CheckLiteralPool(option);
+ __ Bind(&label_1);
+ if (expect_dump) {
+ ASSERT_LITERAL_POOL_SIZE(0);
+ } else {
+ ASSERT_LITERAL_POOL_SIZE(4);
+ }
+
+ // Force a pool flush to check that a second pool functions correctly.
+ __ EmitLiteralPool(JumpRequired);
+ ASSERT_LITERAL_POOL_SIZE(0);
+
+ // These loads should be after the pool (and will require a new one).
+ __ Ldr(x4, 0x34567890abcdef12UL);
+ __ Ldr(w5, 0xdcba09fe);
+ __ Ldr(d4, 123.4);
+ __ Ldr(s5, 250.0);
+ ASSERT_LITERAL_POOL_SIZE(4);
+ END();
+
+ RUN();
+
+ // Check that the literals loaded correctly.
+ ASSERT_EQUAL_64(0x1234567890abcdefUL, x0);
+ ASSERT_EQUAL_64(0xfedcba09, x1);
+ ASSERT_EQUAL_FP64(1.234, d0);
+ ASSERT_EQUAL_FP32(2.5, s1);
+ ASSERT_EQUAL_64(0x34567890abcdef12UL, x4);
+ ASSERT_EQUAL_64(0xdcba09fe, x5);
+ ASSERT_EQUAL_FP64(123.4, d4);
+ ASSERT_EQUAL_FP32(250.0, s5);
+
+ TEARDOWN();
+}
+
+
+TEST(ldr_literal_range_1) {
+ INIT_V8();
+ LdrLiteralRangeHelper(kRecommendedLiteralPoolRange,
+ NoJumpRequired,
+ true);
+}
+
+
+TEST(ldr_literal_range_2) {
+ INIT_V8();
+ LdrLiteralRangeHelper(kRecommendedLiteralPoolRange-sizeof(Instr),
+ NoJumpRequired,
+ false);
+}
+
+
+TEST(ldr_literal_range_3) {
+ INIT_V8();
+ LdrLiteralRangeHelper(2 * kRecommendedLiteralPoolRange,
+ JumpRequired,
+ true);
+}
+
+
+TEST(ldr_literal_range_4) {
+ INIT_V8();
+ LdrLiteralRangeHelper(2 * kRecommendedLiteralPoolRange-sizeof(Instr),
+ JumpRequired,
+ false);
+}
+
+
+TEST(ldr_literal_range_5) {
+ INIT_V8();
+ LdrLiteralRangeHelper(kLiteralPoolCheckInterval,
+ JumpRequired,
+ false);
+}
+
+
+TEST(ldr_literal_range_6) {
+ INIT_V8();
+ LdrLiteralRangeHelper(kLiteralPoolCheckInterval-sizeof(Instr),
+ JumpRequired,
+ false);
+}
+#endif
+
+TEST(add_sub_imm) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0x0);
+ __ Mov(x1, 0x1111);
+ __ Mov(x2, 0xffffffffffffffffL);
+ __ Mov(x3, 0x8000000000000000L);
+
+ __ Add(x10, x0, Operand(0x123));
+ __ Add(x11, x1, Operand(0x122000));
+ __ Add(x12, x0, Operand(0xabc << 12));
+ __ Add(x13, x2, Operand(1));
+
+ __ Add(w14, w0, Operand(0x123));
+ __ Add(w15, w1, Operand(0x122000));
+ __ Add(w16, w0, Operand(0xabc << 12));
+ __ Add(w17, w2, Operand(1));
+
+ __ Sub(x20, x0, Operand(0x1));
+ __ Sub(x21, x1, Operand(0x111));
+ __ Sub(x22, x1, Operand(0x1 << 12));
+ __ Sub(x23, x3, Operand(1));
+
+ __ Sub(w24, w0, Operand(0x1));
+ __ Sub(w25, w1, Operand(0x111));
+ __ Sub(w26, w1, Operand(0x1 << 12));
+ __ Sub(w27, w3, Operand(1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x123, x10);
+ ASSERT_EQUAL_64(0x123111, x11);
+ ASSERT_EQUAL_64(0xabc000, x12);
+ ASSERT_EQUAL_64(0x0, x13);
+
+ ASSERT_EQUAL_32(0x123, w14);
+ ASSERT_EQUAL_32(0x123111, w15);
+ ASSERT_EQUAL_32(0xabc000, w16);
+ ASSERT_EQUAL_32(0x0, w17);
+
+ ASSERT_EQUAL_64(0xffffffffffffffffL, x20);
+ ASSERT_EQUAL_64(0x1000, x21);
+ ASSERT_EQUAL_64(0x111, x22);
+ ASSERT_EQUAL_64(0x7fffffffffffffffL, x23);
+
+ ASSERT_EQUAL_32(0xffffffff, w24);
+ ASSERT_EQUAL_32(0x1000, w25);
+ ASSERT_EQUAL_32(0x111, w26);
+ ASSERT_EQUAL_32(0xffffffff, w27);
+
+ TEARDOWN();
+}
+
+
+TEST(add_sub_wide_imm) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0x0);
+ __ Mov(x1, 0x1);
+
+ __ Add(x10, x0, Operand(0x1234567890abcdefUL));
+ __ Add(x11, x1, Operand(0xffffffff));
+
+ __ Add(w12, w0, Operand(0x12345678));
+ __ Add(w13, w1, Operand(0xffffffff));
+
+ __ Sub(x20, x0, Operand(0x1234567890abcdefUL));
+
+ __ Sub(w21, w0, Operand(0x12345678));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x1234567890abcdefUL, x10);
+ ASSERT_EQUAL_64(0x100000000UL, x11);
+
+ ASSERT_EQUAL_32(0x12345678, w12);
+ ASSERT_EQUAL_64(0x0, x13);
+
+ ASSERT_EQUAL_64(-0x1234567890abcdefUL, x20);
+
+ ASSERT_EQUAL_32(-0x12345678, w21);
+
+ TEARDOWN();
+}
+
+
+TEST(add_sub_shifted) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 0x0123456789abcdefL);
+ __ Mov(x2, 0xfedcba9876543210L);
+ __ Mov(x3, 0xffffffffffffffffL);
+
+ __ Add(x10, x1, Operand(x2));
+ __ Add(x11, x0, Operand(x1, LSL, 8));
+ __ Add(x12, x0, Operand(x1, LSR, 8));
+ __ Add(x13, x0, Operand(x1, ASR, 8));
+ __ Add(x14, x0, Operand(x2, ASR, 8));
+ __ Add(w15, w0, Operand(w1, ASR, 8));
+ __ Add(w18, w3, Operand(w1, ROR, 8));
+ __ Add(x19, x3, Operand(x1, ROR, 8));
+
+ __ Sub(x20, x3, Operand(x2));
+ __ Sub(x21, x3, Operand(x1, LSL, 8));
+ __ Sub(x22, x3, Operand(x1, LSR, 8));
+ __ Sub(x23, x3, Operand(x1, ASR, 8));
+ __ Sub(x24, x3, Operand(x2, ASR, 8));
+ __ Sub(w25, w3, Operand(w1, ASR, 8));
+ __ Sub(w26, w3, Operand(w1, ROR, 8));
+ __ Sub(x27, x3, Operand(x1, ROR, 8));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xffffffffffffffffL, x10);
+ ASSERT_EQUAL_64(0x23456789abcdef00L, x11);
+ ASSERT_EQUAL_64(0x000123456789abcdL, x12);
+ ASSERT_EQUAL_64(0x000123456789abcdL, x13);
+ ASSERT_EQUAL_64(0xfffedcba98765432L, x14);
+ ASSERT_EQUAL_64(0xff89abcd, x15);
+ ASSERT_EQUAL_64(0xef89abcc, x18);
+ ASSERT_EQUAL_64(0xef0123456789abccL, x19);
+
+ ASSERT_EQUAL_64(0x0123456789abcdefL, x20);
+ ASSERT_EQUAL_64(0xdcba9876543210ffL, x21);
+ ASSERT_EQUAL_64(0xfffedcba98765432L, x22);
+ ASSERT_EQUAL_64(0xfffedcba98765432L, x23);
+ ASSERT_EQUAL_64(0x000123456789abcdL, x24);
+ ASSERT_EQUAL_64(0x00765432, x25);
+ ASSERT_EQUAL_64(0x10765432, x26);
+ ASSERT_EQUAL_64(0x10fedcba98765432L, x27);
+
+ TEARDOWN();
+}
+
+
+TEST(add_sub_extended) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 0x0123456789abcdefL);
+ __ Mov(x2, 0xfedcba9876543210L);
+ __ Mov(w3, 0x80);
+
+ __ Add(x10, x0, Operand(x1, UXTB, 0));
+ __ Add(x11, x0, Operand(x1, UXTB, 1));
+ __ Add(x12, x0, Operand(x1, UXTH, 2));
+ __ Add(x13, x0, Operand(x1, UXTW, 4));
+
+ __ Add(x14, x0, Operand(x1, SXTB, 0));
+ __ Add(x15, x0, Operand(x1, SXTB, 1));
+ __ Add(x16, x0, Operand(x1, SXTH, 2));
+ __ Add(x17, x0, Operand(x1, SXTW, 3));
+ __ Add(x18, x0, Operand(x2, SXTB, 0));
+ __ Add(x19, x0, Operand(x2, SXTB, 1));
+ __ Add(x20, x0, Operand(x2, SXTH, 2));
+ __ Add(x21, x0, Operand(x2, SXTW, 3));
+
+ __ Add(x22, x1, Operand(x2, SXTB, 1));
+ __ Sub(x23, x1, Operand(x2, SXTB, 1));
+
+ __ Add(w24, w1, Operand(w2, UXTB, 2));
+ __ Add(w25, w0, Operand(w1, SXTB, 0));
+ __ Add(w26, w0, Operand(w1, SXTB, 1));
+ __ Add(w27, w2, Operand(w1, SXTW, 3));
+
+ __ Add(w28, w0, Operand(w1, SXTW, 3));
+ __ Add(x29, x0, Operand(w1, SXTW, 3));
+
+ __ Sub(x30, x0, Operand(w3, SXTB, 1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xefL, x10);
+ ASSERT_EQUAL_64(0x1deL, x11);
+ ASSERT_EQUAL_64(0x337bcL, x12);
+ ASSERT_EQUAL_64(0x89abcdef0L, x13);
+
+ ASSERT_EQUAL_64(0xffffffffffffffefL, x14);
+ ASSERT_EQUAL_64(0xffffffffffffffdeL, x15);
+ ASSERT_EQUAL_64(0xffffffffffff37bcL, x16);
+ ASSERT_EQUAL_64(0xfffffffc4d5e6f78L, x17);
+ ASSERT_EQUAL_64(0x10L, x18);
+ ASSERT_EQUAL_64(0x20L, x19);
+ ASSERT_EQUAL_64(0xc840L, x20);
+ ASSERT_EQUAL_64(0x3b2a19080L, x21);
+
+ ASSERT_EQUAL_64(0x0123456789abce0fL, x22);
+ ASSERT_EQUAL_64(0x0123456789abcdcfL, x23);
+
+ ASSERT_EQUAL_32(0x89abce2f, w24);
+ ASSERT_EQUAL_32(0xffffffef, w25);
+ ASSERT_EQUAL_32(0xffffffde, w26);
+ ASSERT_EQUAL_32(0xc3b2a188, w27);
+
+ ASSERT_EQUAL_32(0x4d5e6f78, w28);
+ ASSERT_EQUAL_64(0xfffffffc4d5e6f78L, x29);
+
+ ASSERT_EQUAL_64(256, x30);
+
+ TEARDOWN();
+}
+
+
+TEST(add_sub_negative) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 4687);
+ __ Mov(x2, 0x1122334455667788);
+ __ Mov(w3, 0x11223344);
+ __ Mov(w4, 400000);
+
+ __ Add(x10, x0, -42);
+ __ Add(x11, x1, -687);
+ __ Add(x12, x2, -0x88);
+
+ __ Sub(x13, x0, -600);
+ __ Sub(x14, x1, -313);
+ __ Sub(x15, x2, -0x555);
+
+ __ Add(w19, w3, -0x344);
+ __ Add(w20, w4, -2000);
+
+ __ Sub(w21, w3, -0xbc);
+ __ Sub(w22, w4, -2000);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(-42, x10);
+ ASSERT_EQUAL_64(4000, x11);
+ ASSERT_EQUAL_64(0x1122334455667700, x12);
+
+ ASSERT_EQUAL_64(600, x13);
+ ASSERT_EQUAL_64(5000, x14);
+ ASSERT_EQUAL_64(0x1122334455667cdd, x15);
+
+ ASSERT_EQUAL_32(0x11223000, w19);
+ ASSERT_EQUAL_32(398000, w20);
+
+ ASSERT_EQUAL_32(0x11223400, w21);
+ ASSERT_EQUAL_32(402000, w22);
+
+ TEARDOWN();
+}
+
+
+TEST(add_sub_zero) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 0);
+ __ Mov(x2, 0);
+
+ Label blob1;
+ __ Bind(&blob1);
+ __ Add(x0, x0, 0);
+ __ Sub(x1, x1, 0);
+ __ Sub(x2, x2, xzr);
+ CHECK_EQ(0, __ SizeOfCodeGeneratedSince(&blob1));
+
+ Label blob2;
+ __ Bind(&blob2);
+ __ Add(w3, w3, 0);
+ CHECK_NE(0, __ SizeOfCodeGeneratedSince(&blob2));
+
+ Label blob3;
+ __ Bind(&blob3);
+ __ Sub(w3, w3, wzr);
+ CHECK_NE(0, __ SizeOfCodeGeneratedSince(&blob3));
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0, x0);
+ ASSERT_EQUAL_64(0, x1);
+ ASSERT_EQUAL_64(0, x2);
+
+ TEARDOWN();
+}
+
+
+TEST(claim_drop_zero) {
+ INIT_V8();
+ SETUP();
+
+ START();
+
+ Label start;
+ __ Bind(&start);
+ __ Claim(0);
+ __ Drop(0);
+ __ Claim(xzr, 8);
+ __ Drop(xzr, 8);
+ __ Claim(xzr, 0);
+ __ Drop(xzr, 0);
+ __ Claim(x7, 0);
+ __ Drop(x7, 0);
+ __ ClaimBySMI(xzr, 8);
+ __ DropBySMI(xzr, 8);
+ __ ClaimBySMI(xzr, 0);
+ __ DropBySMI(xzr, 0);
+ CHECK_EQ(0, __ SizeOfCodeGeneratedSince(&start));
+
+ END();
+
+ RUN();
+
+ TEARDOWN();
+}
+
+
+TEST(neg) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xf123456789abcdefL);
+
+ // Immediate.
+ __ Neg(x1, 0x123);
+ __ Neg(w2, 0x123);
+
+ // Shifted.
+ __ Neg(x3, Operand(x0, LSL, 1));
+ __ Neg(w4, Operand(w0, LSL, 2));
+ __ Neg(x5, Operand(x0, LSR, 3));
+ __ Neg(w6, Operand(w0, LSR, 4));
+ __ Neg(x7, Operand(x0, ASR, 5));
+ __ Neg(w8, Operand(w0, ASR, 6));
+
+ // Extended.
+ __ Neg(w9, Operand(w0, UXTB));
+ __ Neg(x10, Operand(x0, SXTB, 1));
+ __ Neg(w11, Operand(w0, UXTH, 2));
+ __ Neg(x12, Operand(x0, SXTH, 3));
+ __ Neg(w13, Operand(w0, UXTW, 4));
+ __ Neg(x14, Operand(x0, SXTW, 4));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xfffffffffffffeddUL, x1);
+ ASSERT_EQUAL_64(0xfffffedd, x2);
+ ASSERT_EQUAL_64(0x1db97530eca86422UL, x3);
+ ASSERT_EQUAL_64(0xd950c844, x4);
+ ASSERT_EQUAL_64(0xe1db97530eca8643UL, x5);
+ ASSERT_EQUAL_64(0xf7654322, x6);
+ ASSERT_EQUAL_64(0x0076e5d4c3b2a191UL, x7);
+ ASSERT_EQUAL_64(0x01d950c9, x8);
+ ASSERT_EQUAL_64(0xffffff11, x9);
+ ASSERT_EQUAL_64(0x0000000000000022UL, x10);
+ ASSERT_EQUAL_64(0xfffcc844, x11);
+ ASSERT_EQUAL_64(0x0000000000019088UL, x12);
+ ASSERT_EQUAL_64(0x65432110, x13);
+ ASSERT_EQUAL_64(0x0000000765432110UL, x14);
+
+ TEARDOWN();
+}
+
+
+TEST(adc_sbc_shift) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 1);
+ __ Mov(x2, 0x0123456789abcdefL);
+ __ Mov(x3, 0xfedcba9876543210L);
+ __ Mov(x4, 0xffffffffffffffffL);
+
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+
+ __ Adc(x5, x2, Operand(x3));
+ __ Adc(x6, x0, Operand(x1, LSL, 60));
+ __ Sbc(x7, x4, Operand(x3, LSR, 4));
+ __ Adc(x8, x2, Operand(x3, ASR, 4));
+ __ Adc(x9, x2, Operand(x3, ROR, 8));
+
+ __ Adc(w10, w2, Operand(w3));
+ __ Adc(w11, w0, Operand(w1, LSL, 30));
+ __ Sbc(w12, w4, Operand(w3, LSR, 4));
+ __ Adc(w13, w2, Operand(w3, ASR, 4));
+ __ Adc(w14, w2, Operand(w3, ROR, 8));
+
+ // Set the C flag.
+ __ Cmp(w0, Operand(w0));
+
+ __ Adc(x18, x2, Operand(x3));
+ __ Adc(x19, x0, Operand(x1, LSL, 60));
+ __ Sbc(x20, x4, Operand(x3, LSR, 4));
+ __ Adc(x21, x2, Operand(x3, ASR, 4));
+ __ Adc(x22, x2, Operand(x3, ROR, 8));
+
+ __ Adc(w23, w2, Operand(w3));
+ __ Adc(w24, w0, Operand(w1, LSL, 30));
+ __ Sbc(w25, w4, Operand(w3, LSR, 4));
+ __ Adc(w26, w2, Operand(w3, ASR, 4));
+ __ Adc(w27, w2, Operand(w3, ROR, 8));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xffffffffffffffffL, x5);
+ ASSERT_EQUAL_64(1L << 60, x6);
+ ASSERT_EQUAL_64(0xf0123456789abcddL, x7);
+ ASSERT_EQUAL_64(0x0111111111111110L, x8);
+ ASSERT_EQUAL_64(0x1222222222222221L, x9);
+
+ ASSERT_EQUAL_32(0xffffffff, w10);
+ ASSERT_EQUAL_32(1 << 30, w11);
+ ASSERT_EQUAL_32(0xf89abcdd, w12);
+ ASSERT_EQUAL_32(0x91111110, w13);
+ ASSERT_EQUAL_32(0x9a222221, w14);
+
+ ASSERT_EQUAL_64(0xffffffffffffffffL + 1, x18);
+ ASSERT_EQUAL_64((1L << 60) + 1, x19);
+ ASSERT_EQUAL_64(0xf0123456789abcddL + 1, x20);
+ ASSERT_EQUAL_64(0x0111111111111110L + 1, x21);
+ ASSERT_EQUAL_64(0x1222222222222221L + 1, x22);
+
+ ASSERT_EQUAL_32(0xffffffff + 1, w23);
+ ASSERT_EQUAL_32((1 << 30) + 1, w24);
+ ASSERT_EQUAL_32(0xf89abcdd + 1, w25);
+ ASSERT_EQUAL_32(0x91111110 + 1, w26);
+ ASSERT_EQUAL_32(0x9a222221 + 1, w27);
+
+ // Check that adc correctly sets the condition flags.
+ START();
+ __ Mov(x0, 1);
+ __ Mov(x1, 0xffffffffffffffffL);
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Adcs(x10, x0, Operand(x1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZCFlag);
+ ASSERT_EQUAL_64(0, x10);
+
+ START();
+ __ Mov(x0, 1);
+ __ Mov(x1, 0x8000000000000000L);
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Adcs(x10, x0, Operand(x1, ASR, 63));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZCFlag);
+ ASSERT_EQUAL_64(0, x10);
+
+ START();
+ __ Mov(x0, 0x10);
+ __ Mov(x1, 0x07ffffffffffffffL);
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Adcs(x10, x0, Operand(x1, LSL, 4));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NVFlag);
+ ASSERT_EQUAL_64(0x8000000000000000L, x10);
+
+ // Check that sbc correctly sets the condition flags.
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 0xffffffffffffffffL);
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Sbcs(x10, x0, Operand(x1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZFlag);
+ ASSERT_EQUAL_64(0, x10);
+
+ START();
+ __ Mov(x0, 1);
+ __ Mov(x1, 0xffffffffffffffffL);
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Sbcs(x10, x0, Operand(x1, LSR, 1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+ ASSERT_EQUAL_64(0x8000000000000001L, x10);
+
+ START();
+ __ Mov(x0, 0);
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Sbcs(x10, x0, Operand(0xffffffffffffffffL));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZFlag);
+ ASSERT_EQUAL_64(0, x10);
+
+ START()
+ __ Mov(w0, 0x7fffffff);
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Ngcs(w10, w0);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+ ASSERT_EQUAL_64(0x80000000, x10);
+
+ START();
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Ngcs(x10, 0x7fffffffffffffffL);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+ ASSERT_EQUAL_64(0x8000000000000000L, x10);
+
+ START()
+ __ Mov(x0, 0);
+ // Set the C flag.
+ __ Cmp(x0, Operand(x0));
+ __ Sbcs(x10, x0, Operand(1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+ ASSERT_EQUAL_64(0xffffffffffffffffL, x10);
+
+ START()
+ __ Mov(x0, 0);
+ // Set the C flag.
+ __ Cmp(x0, Operand(x0));
+ __ Ngcs(x10, 0x7fffffffffffffffL);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+ ASSERT_EQUAL_64(0x8000000000000001L, x10);
+
+ TEARDOWN();
+}
+
+
+TEST(adc_sbc_extend) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+
+ __ Mov(x0, 0);
+ __ Mov(x1, 1);
+ __ Mov(x2, 0x0123456789abcdefL);
+
+ __ Adc(x10, x1, Operand(w2, UXTB, 1));
+ __ Adc(x11, x1, Operand(x2, SXTH, 2));
+ __ Sbc(x12, x1, Operand(w2, UXTW, 4));
+ __ Adc(x13, x1, Operand(x2, UXTX, 4));
+
+ __ Adc(w14, w1, Operand(w2, UXTB, 1));
+ __ Adc(w15, w1, Operand(w2, SXTH, 2));
+ __ Adc(w9, w1, Operand(w2, UXTW, 4));
+
+ // Set the C flag.
+ __ Cmp(w0, Operand(w0));
+
+ __ Adc(x20, x1, Operand(w2, UXTB, 1));
+ __ Adc(x21, x1, Operand(x2, SXTH, 2));
+ __ Sbc(x22, x1, Operand(w2, UXTW, 4));
+ __ Adc(x23, x1, Operand(x2, UXTX, 4));
+
+ __ Adc(w24, w1, Operand(w2, UXTB, 1));
+ __ Adc(w25, w1, Operand(w2, SXTH, 2));
+ __ Adc(w26, w1, Operand(w2, UXTW, 4));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x1df, x10);
+ ASSERT_EQUAL_64(0xffffffffffff37bdL, x11);
+ ASSERT_EQUAL_64(0xfffffff765432110L, x12);
+ ASSERT_EQUAL_64(0x123456789abcdef1L, x13);
+
+ ASSERT_EQUAL_32(0x1df, w14);
+ ASSERT_EQUAL_32(0xffff37bd, w15);
+ ASSERT_EQUAL_32(0x9abcdef1, w9);
+
+ ASSERT_EQUAL_64(0x1df + 1, x20);
+ ASSERT_EQUAL_64(0xffffffffffff37bdL + 1, x21);
+ ASSERT_EQUAL_64(0xfffffff765432110L + 1, x22);
+ ASSERT_EQUAL_64(0x123456789abcdef1L + 1, x23);
+
+ ASSERT_EQUAL_32(0x1df + 1, w24);
+ ASSERT_EQUAL_32(0xffff37bd + 1, w25);
+ ASSERT_EQUAL_32(0x9abcdef1 + 1, w26);
+
+ // Check that adc correctly sets the condition flags.
+ START();
+ __ Mov(x0, 0xff);
+ __ Mov(x1, 0xffffffffffffffffL);
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Adcs(x10, x0, Operand(x1, SXTX, 1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(CFlag);
+
+ START();
+ __ Mov(x0, 0x7fffffffffffffffL);
+ __ Mov(x1, 1);
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Adcs(x10, x0, Operand(x1, UXTB, 2));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NVFlag);
+
+ START();
+ __ Mov(x0, 0x7fffffffffffffffL);
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Adcs(x10, x0, Operand(1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NVFlag);
+
+ TEARDOWN();
+}
+
+
+TEST(adc_sbc_wide_imm) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0);
+
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+
+ __ Adc(x7, x0, Operand(0x1234567890abcdefUL));
+ __ Adc(w8, w0, Operand(0xffffffff));
+ __ Sbc(x9, x0, Operand(0x1234567890abcdefUL));
+ __ Sbc(w10, w0, Operand(0xffffffff));
+ __ Ngc(x11, Operand(0xffffffff00000000UL));
+ __ Ngc(w12, Operand(0xffff0000));
+
+ // Set the C flag.
+ __ Cmp(w0, Operand(w0));
+
+ __ Adc(x18, x0, Operand(0x1234567890abcdefUL));
+ __ Adc(w19, w0, Operand(0xffffffff));
+ __ Sbc(x20, x0, Operand(0x1234567890abcdefUL));
+ __ Sbc(w21, w0, Operand(0xffffffff));
+ __ Ngc(x22, Operand(0xffffffff00000000UL));
+ __ Ngc(w23, Operand(0xffff0000));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x1234567890abcdefUL, x7);
+ ASSERT_EQUAL_64(0xffffffff, x8);
+ ASSERT_EQUAL_64(0xedcba9876f543210UL, x9);
+ ASSERT_EQUAL_64(0, x10);
+ ASSERT_EQUAL_64(0xffffffff, x11);
+ ASSERT_EQUAL_64(0xffff, x12);
+
+ ASSERT_EQUAL_64(0x1234567890abcdefUL + 1, x18);
+ ASSERT_EQUAL_64(0, x19);
+ ASSERT_EQUAL_64(0xedcba9876f543211UL, x20);
+ ASSERT_EQUAL_64(1, x21);
+ ASSERT_EQUAL_64(0x100000000UL, x22);
+ ASSERT_EQUAL_64(0x10000, x23);
+
+ TEARDOWN();
+}
+
+
+TEST(flags) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 0x1111111111111111L);
+ __ Neg(x10, Operand(x0));
+ __ Neg(x11, Operand(x1));
+ __ Neg(w12, Operand(w1));
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Ngc(x13, Operand(x0));
+ // Set the C flag.
+ __ Cmp(x0, Operand(x0));
+ __ Ngc(w14, Operand(w0));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0, x10);
+ ASSERT_EQUAL_64(-0x1111111111111111L, x11);
+ ASSERT_EQUAL_32(-0x11111111, w12);
+ ASSERT_EQUAL_64(-1L, x13);
+ ASSERT_EQUAL_32(0, w14);
+
+ START();
+ __ Mov(x0, 0);
+ __ Cmp(x0, Operand(x0));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZCFlag);
+
+ START();
+ __ Mov(w0, 0);
+ __ Cmp(w0, Operand(w0));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZCFlag);
+
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 0x1111111111111111L);
+ __ Cmp(x0, Operand(x1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+
+ START();
+ __ Mov(w0, 0);
+ __ Mov(w1, 0x11111111);
+ __ Cmp(w0, Operand(w1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+
+ START();
+ __ Mov(x1, 0x1111111111111111L);
+ __ Cmp(x1, Operand(0));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(CFlag);
+
+ START();
+ __ Mov(w1, 0x11111111);
+ __ Cmp(w1, Operand(0));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(CFlag);
+
+ START();
+ __ Mov(x0, 1);
+ __ Mov(x1, 0x7fffffffffffffffL);
+ __ Cmn(x1, Operand(x0));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NVFlag);
+
+ START();
+ __ Mov(w0, 1);
+ __ Mov(w1, 0x7fffffff);
+ __ Cmn(w1, Operand(w0));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NVFlag);
+
+ START();
+ __ Mov(x0, 1);
+ __ Mov(x1, 0xffffffffffffffffL);
+ __ Cmn(x1, Operand(x0));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZCFlag);
+
+ START();
+ __ Mov(w0, 1);
+ __ Mov(w1, 0xffffffff);
+ __ Cmn(w1, Operand(w0));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZCFlag);
+
+ START();
+ __ Mov(w0, 0);
+ __ Mov(w1, 1);
+ // Clear the C flag.
+ __ Adds(w0, w0, Operand(0));
+ __ Ngcs(w0, Operand(w1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+
+ START();
+ __ Mov(w0, 0);
+ __ Mov(w1, 0);
+ // Set the C flag.
+ __ Cmp(w0, Operand(w0));
+ __ Ngcs(w0, Operand(w1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZCFlag);
+
+ TEARDOWN();
+}
+
+
+TEST(cmp_shift) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x18, 0xf0000000);
+ __ Mov(x19, 0xf000000010000000UL);
+ __ Mov(x20, 0xf0000000f0000000UL);
+ __ Mov(x21, 0x7800000078000000UL);
+ __ Mov(x22, 0x3c0000003c000000UL);
+ __ Mov(x23, 0x8000000780000000UL);
+ __ Mov(x24, 0x0000000f00000000UL);
+ __ Mov(x25, 0x00000003c0000000UL);
+ __ Mov(x26, 0x8000000780000000UL);
+ __ Mov(x27, 0xc0000003);
+
+ __ Cmp(w20, Operand(w21, LSL, 1));
+ __ Mrs(x0, NZCV);
+
+ __ Cmp(x20, Operand(x22, LSL, 2));
+ __ Mrs(x1, NZCV);
+
+ __ Cmp(w19, Operand(w23, LSR, 3));
+ __ Mrs(x2, NZCV);
+
+ __ Cmp(x18, Operand(x24, LSR, 4));
+ __ Mrs(x3, NZCV);
+
+ __ Cmp(w20, Operand(w25, ASR, 2));
+ __ Mrs(x4, NZCV);
+
+ __ Cmp(x20, Operand(x26, ASR, 3));
+ __ Mrs(x5, NZCV);
+
+ __ Cmp(w27, Operand(w22, ROR, 28));
+ __ Mrs(x6, NZCV);
+
+ __ Cmp(x20, Operand(x21, ROR, 31));
+ __ Mrs(x7, NZCV);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_32(ZCFlag, w0);
+ ASSERT_EQUAL_32(ZCFlag, w1);
+ ASSERT_EQUAL_32(ZCFlag, w2);
+ ASSERT_EQUAL_32(ZCFlag, w3);
+ ASSERT_EQUAL_32(ZCFlag, w4);
+ ASSERT_EQUAL_32(ZCFlag, w5);
+ ASSERT_EQUAL_32(ZCFlag, w6);
+ ASSERT_EQUAL_32(ZCFlag, w7);
+
+ TEARDOWN();
+}
+
+
+TEST(cmp_extend) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(w20, 0x2);
+ __ Mov(w21, 0x1);
+ __ Mov(x22, 0xffffffffffffffffUL);
+ __ Mov(x23, 0xff);
+ __ Mov(x24, 0xfffffffffffffffeUL);
+ __ Mov(x25, 0xffff);
+ __ Mov(x26, 0xffffffff);
+
+ __ Cmp(w20, Operand(w21, LSL, 1));
+ __ Mrs(x0, NZCV);
+
+ __ Cmp(x22, Operand(x23, SXTB, 0));
+ __ Mrs(x1, NZCV);
+
+ __ Cmp(x24, Operand(x23, SXTB, 1));
+ __ Mrs(x2, NZCV);
+
+ __ Cmp(x24, Operand(x23, UXTB, 1));
+ __ Mrs(x3, NZCV);
+
+ __ Cmp(w22, Operand(w25, UXTH));
+ __ Mrs(x4, NZCV);
+
+ __ Cmp(x22, Operand(x25, SXTH));
+ __ Mrs(x5, NZCV);
+
+ __ Cmp(x22, Operand(x26, UXTW));
+ __ Mrs(x6, NZCV);
+
+ __ Cmp(x24, Operand(x26, SXTW, 1));
+ __ Mrs(x7, NZCV);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_32(ZCFlag, w0);
+ ASSERT_EQUAL_32(ZCFlag, w1);
+ ASSERT_EQUAL_32(ZCFlag, w2);
+ ASSERT_EQUAL_32(NCFlag, w3);
+ ASSERT_EQUAL_32(NCFlag, w4);
+ ASSERT_EQUAL_32(ZCFlag, w5);
+ ASSERT_EQUAL_32(NCFlag, w6);
+ ASSERT_EQUAL_32(ZCFlag, w7);
+
+ TEARDOWN();
+}
+
+
+TEST(ccmp) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(w16, 0);
+ __ Mov(w17, 1);
+ __ Cmp(w16, w16);
+ __ Ccmp(w16, w17, NCFlag, eq);
+ __ Mrs(x0, NZCV);
+
+ __ Cmp(w16, w16);
+ __ Ccmp(w16, w17, NCFlag, ne);
+ __ Mrs(x1, NZCV);
+
+ __ Cmp(x16, x16);
+ __ Ccmn(x16, 2, NZCVFlag, eq);
+ __ Mrs(x2, NZCV);
+
+ __ Cmp(x16, x16);
+ __ Ccmn(x16, 2, NZCVFlag, ne);
+ __ Mrs(x3, NZCV);
+
+ __ ccmp(x16, x16, NZCVFlag, al);
+ __ Mrs(x4, NZCV);
+
+ __ ccmp(x16, x16, NZCVFlag, nv);
+ __ Mrs(x5, NZCV);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_32(NFlag, w0);
+ ASSERT_EQUAL_32(NCFlag, w1);
+ ASSERT_EQUAL_32(NoFlag, w2);
+ ASSERT_EQUAL_32(NZCVFlag, w3);
+ ASSERT_EQUAL_32(ZCFlag, w4);
+ ASSERT_EQUAL_32(ZCFlag, w5);
+
+ TEARDOWN();
+}
+
+
+TEST(ccmp_wide_imm) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(w20, 0);
+
+ __ Cmp(w20, Operand(w20));
+ __ Ccmp(w20, Operand(0x12345678), NZCVFlag, eq);
+ __ Mrs(x0, NZCV);
+
+ __ Cmp(w20, Operand(w20));
+ __ Ccmp(x20, Operand(0xffffffffffffffffUL), NZCVFlag, eq);
+ __ Mrs(x1, NZCV);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_32(NFlag, w0);
+ ASSERT_EQUAL_32(NoFlag, w1);
+
+ TEARDOWN();
+}
+
+
+TEST(ccmp_shift_extend) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(w20, 0x2);
+ __ Mov(w21, 0x1);
+ __ Mov(x22, 0xffffffffffffffffUL);
+ __ Mov(x23, 0xff);
+ __ Mov(x24, 0xfffffffffffffffeUL);
+
+ __ Cmp(w20, Operand(w20));
+ __ Ccmp(w20, Operand(w21, LSL, 1), NZCVFlag, eq);
+ __ Mrs(x0, NZCV);
+
+ __ Cmp(w20, Operand(w20));
+ __ Ccmp(x22, Operand(x23, SXTB, 0), NZCVFlag, eq);
+ __ Mrs(x1, NZCV);
+
+ __ Cmp(w20, Operand(w20));
+ __ Ccmp(x24, Operand(x23, SXTB, 1), NZCVFlag, eq);
+ __ Mrs(x2, NZCV);
+
+ __ Cmp(w20, Operand(w20));
+ __ Ccmp(x24, Operand(x23, UXTB, 1), NZCVFlag, eq);
+ __ Mrs(x3, NZCV);
+
+ __ Cmp(w20, Operand(w20));
+ __ Ccmp(x24, Operand(x23, UXTB, 1), NZCVFlag, ne);
+ __ Mrs(x4, NZCV);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_32(ZCFlag, w0);
+ ASSERT_EQUAL_32(ZCFlag, w1);
+ ASSERT_EQUAL_32(ZCFlag, w2);
+ ASSERT_EQUAL_32(NCFlag, w3);
+ ASSERT_EQUAL_32(NZCVFlag, w4);
+
+ TEARDOWN();
+}
+
+
+TEST(csel) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x16, 0);
+ __ Mov(x24, 0x0000000f0000000fUL);
+ __ Mov(x25, 0x0000001f0000001fUL);
+ __ Mov(x26, 0);
+ __ Mov(x27, 0);
+
+ __ Cmp(w16, 0);
+ __ Csel(w0, w24, w25, eq);
+ __ Csel(w1, w24, w25, ne);
+ __ Csinc(w2, w24, w25, mi);
+ __ Csinc(w3, w24, w25, pl);
+
+ __ csel(w13, w24, w25, al);
+ __ csel(x14, x24, x25, nv);
+
+ __ Cmp(x16, 1);
+ __ Csinv(x4, x24, x25, gt);
+ __ Csinv(x5, x24, x25, le);
+ __ Csneg(x6, x24, x25, hs);
+ __ Csneg(x7, x24, x25, lo);
+
+ __ Cset(w8, ne);
+ __ Csetm(w9, ne);
+ __ Cinc(x10, x25, ne);
+ __ Cinv(x11, x24, ne);
+ __ Cneg(x12, x24, ne);
+
+ __ csel(w15, w24, w25, al);
+ __ csel(x18, x24, x25, nv);
+
+ __ CzeroX(x24, ne);
+ __ CzeroX(x25, eq);
+
+ __ CmovX(x26, x25, ne);
+ __ CmovX(x27, x25, eq);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x0000000f, x0);
+ ASSERT_EQUAL_64(0x0000001f, x1);
+ ASSERT_EQUAL_64(0x00000020, x2);
+ ASSERT_EQUAL_64(0x0000000f, x3);
+ ASSERT_EQUAL_64(0xffffffe0ffffffe0UL, x4);
+ ASSERT_EQUAL_64(0x0000000f0000000fUL, x5);
+ ASSERT_EQUAL_64(0xffffffe0ffffffe1UL, x6);
+ ASSERT_EQUAL_64(0x0000000f0000000fUL, x7);
+ ASSERT_EQUAL_64(0x00000001, x8);
+ ASSERT_EQUAL_64(0xffffffff, x9);
+ ASSERT_EQUAL_64(0x0000001f00000020UL, x10);
+ ASSERT_EQUAL_64(0xfffffff0fffffff0UL, x11);
+ ASSERT_EQUAL_64(0xfffffff0fffffff1UL, x12);
+ ASSERT_EQUAL_64(0x0000000f, x13);
+ ASSERT_EQUAL_64(0x0000000f0000000fUL, x14);
+ ASSERT_EQUAL_64(0x0000000f, x15);
+ ASSERT_EQUAL_64(0x0000000f0000000fUL, x18);
+ ASSERT_EQUAL_64(0, x24);
+ ASSERT_EQUAL_64(0x0000001f0000001fUL, x25);
+ ASSERT_EQUAL_64(0x0000001f0000001fUL, x26);
+ ASSERT_EQUAL_64(0, x27);
+
+ TEARDOWN();
+}
+
+
+TEST(csel_imm) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x18, 0);
+ __ Mov(x19, 0x80000000);
+ __ Mov(x20, 0x8000000000000000UL);
+
+ __ Cmp(x18, Operand(0));
+ __ Csel(w0, w19, -2, ne);
+ __ Csel(w1, w19, -1, ne);
+ __ Csel(w2, w19, 0, ne);
+ __ Csel(w3, w19, 1, ne);
+ __ Csel(w4, w19, 2, ne);
+ __ Csel(w5, w19, Operand(w19, ASR, 31), ne);
+ __ Csel(w6, w19, Operand(w19, ROR, 1), ne);
+ __ Csel(w7, w19, 3, eq);
+
+ __ Csel(x8, x20, -2, ne);
+ __ Csel(x9, x20, -1, ne);
+ __ Csel(x10, x20, 0, ne);
+ __ Csel(x11, x20, 1, ne);
+ __ Csel(x12, x20, 2, ne);
+ __ Csel(x13, x20, Operand(x20, ASR, 63), ne);
+ __ Csel(x14, x20, Operand(x20, ROR, 1), ne);
+ __ Csel(x15, x20, 3, eq);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_32(-2, w0);
+ ASSERT_EQUAL_32(-1, w1);
+ ASSERT_EQUAL_32(0, w2);
+ ASSERT_EQUAL_32(1, w3);
+ ASSERT_EQUAL_32(2, w4);
+ ASSERT_EQUAL_32(-1, w5);
+ ASSERT_EQUAL_32(0x40000000, w6);
+ ASSERT_EQUAL_32(0x80000000, w7);
+
+ ASSERT_EQUAL_64(-2, x8);
+ ASSERT_EQUAL_64(-1, x9);
+ ASSERT_EQUAL_64(0, x10);
+ ASSERT_EQUAL_64(1, x11);
+ ASSERT_EQUAL_64(2, x12);
+ ASSERT_EQUAL_64(-1, x13);
+ ASSERT_EQUAL_64(0x4000000000000000UL, x14);
+ ASSERT_EQUAL_64(0x8000000000000000UL, x15);
+
+ TEARDOWN();
+}
+
+
+TEST(lslv) {
+ INIT_V8();
+ SETUP();
+
+ uint64_t value = 0x0123456789abcdefUL;
+ int shift[] = {1, 3, 5, 9, 17, 33};
+
+ START();
+ __ Mov(x0, value);
+ __ Mov(w1, shift[0]);
+ __ Mov(w2, shift[1]);
+ __ Mov(w3, shift[2]);
+ __ Mov(w4, shift[3]);
+ __ Mov(w5, shift[4]);
+ __ Mov(w6, shift[5]);
+
+ __ lslv(x0, x0, xzr);
+
+ __ Lsl(x16, x0, x1);
+ __ Lsl(x17, x0, x2);
+ __ Lsl(x18, x0, x3);
+ __ Lsl(x19, x0, x4);
+ __ Lsl(x20, x0, x5);
+ __ Lsl(x21, x0, x6);
+
+ __ Lsl(w22, w0, w1);
+ __ Lsl(w23, w0, w2);
+ __ Lsl(w24, w0, w3);
+ __ Lsl(w25, w0, w4);
+ __ Lsl(w26, w0, w5);
+ __ Lsl(w27, w0, w6);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(value, x0);
+ ASSERT_EQUAL_64(value << (shift[0] & 63), x16);
+ ASSERT_EQUAL_64(value << (shift[1] & 63), x17);
+ ASSERT_EQUAL_64(value << (shift[2] & 63), x18);
+ ASSERT_EQUAL_64(value << (shift[3] & 63), x19);
+ ASSERT_EQUAL_64(value << (shift[4] & 63), x20);
+ ASSERT_EQUAL_64(value << (shift[5] & 63), x21);
+ ASSERT_EQUAL_32(value << (shift[0] & 31), w22);
+ ASSERT_EQUAL_32(value << (shift[1] & 31), w23);
+ ASSERT_EQUAL_32(value << (shift[2] & 31), w24);
+ ASSERT_EQUAL_32(value << (shift[3] & 31), w25);
+ ASSERT_EQUAL_32(value << (shift[4] & 31), w26);
+ ASSERT_EQUAL_32(value << (shift[5] & 31), w27);
+
+ TEARDOWN();
+}
+
+
+TEST(lsrv) {
+ INIT_V8();
+ SETUP();
+
+ uint64_t value = 0x0123456789abcdefUL;
+ int shift[] = {1, 3, 5, 9, 17, 33};
+
+ START();
+ __ Mov(x0, value);
+ __ Mov(w1, shift[0]);
+ __ Mov(w2, shift[1]);
+ __ Mov(w3, shift[2]);
+ __ Mov(w4, shift[3]);
+ __ Mov(w5, shift[4]);
+ __ Mov(w6, shift[5]);
+
+ __ lsrv(x0, x0, xzr);
+
+ __ Lsr(x16, x0, x1);
+ __ Lsr(x17, x0, x2);
+ __ Lsr(x18, x0, x3);
+ __ Lsr(x19, x0, x4);
+ __ Lsr(x20, x0, x5);
+ __ Lsr(x21, x0, x6);
+
+ __ Lsr(w22, w0, w1);
+ __ Lsr(w23, w0, w2);
+ __ Lsr(w24, w0, w3);
+ __ Lsr(w25, w0, w4);
+ __ Lsr(w26, w0, w5);
+ __ Lsr(w27, w0, w6);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(value, x0);
+ ASSERT_EQUAL_64(value >> (shift[0] & 63), x16);
+ ASSERT_EQUAL_64(value >> (shift[1] & 63), x17);
+ ASSERT_EQUAL_64(value >> (shift[2] & 63), x18);
+ ASSERT_EQUAL_64(value >> (shift[3] & 63), x19);
+ ASSERT_EQUAL_64(value >> (shift[4] & 63), x20);
+ ASSERT_EQUAL_64(value >> (shift[5] & 63), x21);
+
+ value &= 0xffffffffUL;
+ ASSERT_EQUAL_32(value >> (shift[0] & 31), w22);
+ ASSERT_EQUAL_32(value >> (shift[1] & 31), w23);
+ ASSERT_EQUAL_32(value >> (shift[2] & 31), w24);
+ ASSERT_EQUAL_32(value >> (shift[3] & 31), w25);
+ ASSERT_EQUAL_32(value >> (shift[4] & 31), w26);
+ ASSERT_EQUAL_32(value >> (shift[5] & 31), w27);
+
+ TEARDOWN();
+}
+
+
+TEST(asrv) {
+ INIT_V8();
+ SETUP();
+
+ int64_t value = 0xfedcba98fedcba98UL;
+ int shift[] = {1, 3, 5, 9, 17, 33};
+
+ START();
+ __ Mov(x0, value);
+ __ Mov(w1, shift[0]);
+ __ Mov(w2, shift[1]);
+ __ Mov(w3, shift[2]);
+ __ Mov(w4, shift[3]);
+ __ Mov(w5, shift[4]);
+ __ Mov(w6, shift[5]);
+
+ __ asrv(x0, x0, xzr);
+
+ __ Asr(x16, x0, x1);
+ __ Asr(x17, x0, x2);
+ __ Asr(x18, x0, x3);
+ __ Asr(x19, x0, x4);
+ __ Asr(x20, x0, x5);
+ __ Asr(x21, x0, x6);
+
+ __ Asr(w22, w0, w1);
+ __ Asr(w23, w0, w2);
+ __ Asr(w24, w0, w3);
+ __ Asr(w25, w0, w4);
+ __ Asr(w26, w0, w5);
+ __ Asr(w27, w0, w6);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(value, x0);
+ ASSERT_EQUAL_64(value >> (shift[0] & 63), x16);
+ ASSERT_EQUAL_64(value >> (shift[1] & 63), x17);
+ ASSERT_EQUAL_64(value >> (shift[2] & 63), x18);
+ ASSERT_EQUAL_64(value >> (shift[3] & 63), x19);
+ ASSERT_EQUAL_64(value >> (shift[4] & 63), x20);
+ ASSERT_EQUAL_64(value >> (shift[5] & 63), x21);
+
+ int32_t value32 = static_cast<int32_t>(value & 0xffffffffUL);
+ ASSERT_EQUAL_32(value32 >> (shift[0] & 31), w22);
+ ASSERT_EQUAL_32(value32 >> (shift[1] & 31), w23);
+ ASSERT_EQUAL_32(value32 >> (shift[2] & 31), w24);
+ ASSERT_EQUAL_32(value32 >> (shift[3] & 31), w25);
+ ASSERT_EQUAL_32(value32 >> (shift[4] & 31), w26);
+ ASSERT_EQUAL_32(value32 >> (shift[5] & 31), w27);
+
+ TEARDOWN();
+}
+
+
+TEST(rorv) {
+ INIT_V8();
+ SETUP();
+
+ uint64_t value = 0x0123456789abcdefUL;
+ int shift[] = {4, 8, 12, 16, 24, 36};
+
+ START();
+ __ Mov(x0, value);
+ __ Mov(w1, shift[0]);
+ __ Mov(w2, shift[1]);
+ __ Mov(w3, shift[2]);
+ __ Mov(w4, shift[3]);
+ __ Mov(w5, shift[4]);
+ __ Mov(w6, shift[5]);
+
+ __ rorv(x0, x0, xzr);
+
+ __ Ror(x16, x0, x1);
+ __ Ror(x17, x0, x2);
+ __ Ror(x18, x0, x3);
+ __ Ror(x19, x0, x4);
+ __ Ror(x20, x0, x5);
+ __ Ror(x21, x0, x6);
+
+ __ Ror(w22, w0, w1);
+ __ Ror(w23, w0, w2);
+ __ Ror(w24, w0, w3);
+ __ Ror(w25, w0, w4);
+ __ Ror(w26, w0, w5);
+ __ Ror(w27, w0, w6);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(value, x0);
+ ASSERT_EQUAL_64(0xf0123456789abcdeUL, x16);
+ ASSERT_EQUAL_64(0xef0123456789abcdUL, x17);
+ ASSERT_EQUAL_64(0xdef0123456789abcUL, x18);
+ ASSERT_EQUAL_64(0xcdef0123456789abUL, x19);
+ ASSERT_EQUAL_64(0xabcdef0123456789UL, x20);
+ ASSERT_EQUAL_64(0x789abcdef0123456UL, x21);
+ ASSERT_EQUAL_32(0xf89abcde, w22);
+ ASSERT_EQUAL_32(0xef89abcd, w23);
+ ASSERT_EQUAL_32(0xdef89abc, w24);
+ ASSERT_EQUAL_32(0xcdef89ab, w25);
+ ASSERT_EQUAL_32(0xabcdef89, w26);
+ ASSERT_EQUAL_32(0xf89abcde, w27);
+
+ TEARDOWN();
+}
+
+
+TEST(bfm) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x1, 0x0123456789abcdefL);
+
+ __ Mov(x10, 0x8888888888888888L);
+ __ Mov(x11, 0x8888888888888888L);
+ __ Mov(x12, 0x8888888888888888L);
+ __ Mov(x13, 0x8888888888888888L);
+ __ Mov(w20, 0x88888888);
+ __ Mov(w21, 0x88888888);
+
+ __ bfm(x10, x1, 16, 31);
+ __ bfm(x11, x1, 32, 15);
+
+ __ bfm(w20, w1, 16, 23);
+ __ bfm(w21, w1, 24, 15);
+
+ // Aliases.
+ __ Bfi(x12, x1, 16, 8);
+ __ Bfxil(x13, x1, 16, 8);
+ END();
+
+ RUN();
+
+
+ ASSERT_EQUAL_64(0x88888888888889abL, x10);
+ ASSERT_EQUAL_64(0x8888cdef88888888L, x11);
+
+ ASSERT_EQUAL_32(0x888888ab, w20);
+ ASSERT_EQUAL_32(0x88cdef88, w21);
+
+ ASSERT_EQUAL_64(0x8888888888ef8888L, x12);
+ ASSERT_EQUAL_64(0x88888888888888abL, x13);
+
+ TEARDOWN();
+}
+
+
+TEST(sbfm) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x1, 0x0123456789abcdefL);
+ __ Mov(x2, 0xfedcba9876543210L);
+
+ __ sbfm(x10, x1, 16, 31);
+ __ sbfm(x11, x1, 32, 15);
+ __ sbfm(x12, x1, 32, 47);
+ __ sbfm(x13, x1, 48, 35);
+
+ __ sbfm(w14, w1, 16, 23);
+ __ sbfm(w15, w1, 24, 15);
+ __ sbfm(w16, w2, 16, 23);
+ __ sbfm(w17, w2, 24, 15);
+
+ // Aliases.
+ __ Asr(x18, x1, 32);
+ __ Asr(x19, x2, 32);
+ __ Sbfiz(x20, x1, 8, 16);
+ __ Sbfiz(x21, x2, 8, 16);
+ __ Sbfx(x22, x1, 8, 16);
+ __ Sbfx(x23, x2, 8, 16);
+ __ Sxtb(x24, w1);
+ __ Sxtb(x25, x2);
+ __ Sxth(x26, w1);
+ __ Sxth(x27, x2);
+ __ Sxtw(x28, w1);
+ __ Sxtw(x29, x2);
+ END();
+
+ RUN();
+
+
+ ASSERT_EQUAL_64(0xffffffffffff89abL, x10);
+ ASSERT_EQUAL_64(0xffffcdef00000000L, x11);
+ ASSERT_EQUAL_64(0x4567L, x12);
+ ASSERT_EQUAL_64(0x789abcdef0000L, x13);
+
+ ASSERT_EQUAL_32(0xffffffab, w14);
+ ASSERT_EQUAL_32(0xffcdef00, w15);
+ ASSERT_EQUAL_32(0x54, w16);
+ ASSERT_EQUAL_32(0x00321000, w17);
+
+ ASSERT_EQUAL_64(0x01234567L, x18);
+ ASSERT_EQUAL_64(0xfffffffffedcba98L, x19);
+ ASSERT_EQUAL_64(0xffffffffffcdef00L, x20);
+ ASSERT_EQUAL_64(0x321000L, x21);
+ ASSERT_EQUAL_64(0xffffffffffffabcdL, x22);
+ ASSERT_EQUAL_64(0x5432L, x23);
+ ASSERT_EQUAL_64(0xffffffffffffffefL, x24);
+ ASSERT_EQUAL_64(0x10, x25);
+ ASSERT_EQUAL_64(0xffffffffffffcdefL, x26);
+ ASSERT_EQUAL_64(0x3210, x27);
+ ASSERT_EQUAL_64(0xffffffff89abcdefL, x28);
+ ASSERT_EQUAL_64(0x76543210, x29);
+
+ TEARDOWN();
+}
+
+
+TEST(ubfm) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x1, 0x0123456789abcdefL);
+ __ Mov(x2, 0xfedcba9876543210L);
+
+ __ Mov(x10, 0x8888888888888888L);
+ __ Mov(x11, 0x8888888888888888L);
+
+ __ ubfm(x10, x1, 16, 31);
+ __ ubfm(x11, x1, 32, 15);
+ __ ubfm(x12, x1, 32, 47);
+ __ ubfm(x13, x1, 48, 35);
+
+ __ ubfm(w25, w1, 16, 23);
+ __ ubfm(w26, w1, 24, 15);
+ __ ubfm(w27, w2, 16, 23);
+ __ ubfm(w28, w2, 24, 15);
+
+ // Aliases
+ __ Lsl(x15, x1, 63);
+ __ Lsl(x16, x1, 0);
+ __ Lsr(x17, x1, 32);
+ __ Ubfiz(x18, x1, 8, 16);
+ __ Ubfx(x19, x1, 8, 16);
+ __ Uxtb(x20, x1);
+ __ Uxth(x21, x1);
+ __ Uxtw(x22, x1);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x00000000000089abL, x10);
+ ASSERT_EQUAL_64(0x0000cdef00000000L, x11);
+ ASSERT_EQUAL_64(0x4567L, x12);
+ ASSERT_EQUAL_64(0x789abcdef0000L, x13);
+
+ ASSERT_EQUAL_32(0x000000ab, w25);
+ ASSERT_EQUAL_32(0x00cdef00, w26);
+ ASSERT_EQUAL_32(0x54, w27);
+ ASSERT_EQUAL_32(0x00321000, w28);
+
+ ASSERT_EQUAL_64(0x8000000000000000L, x15);
+ ASSERT_EQUAL_64(0x0123456789abcdefL, x16);
+ ASSERT_EQUAL_64(0x01234567L, x17);
+ ASSERT_EQUAL_64(0xcdef00L, x18);
+ ASSERT_EQUAL_64(0xabcdL, x19);
+ ASSERT_EQUAL_64(0xefL, x20);
+ ASSERT_EQUAL_64(0xcdefL, x21);
+ ASSERT_EQUAL_64(0x89abcdefL, x22);
+
+ TEARDOWN();
+}
+
+
+TEST(extr) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x1, 0x0123456789abcdefL);
+ __ Mov(x2, 0xfedcba9876543210L);
+
+ __ Extr(w10, w1, w2, 0);
+ __ Extr(w11, w1, w2, 1);
+ __ Extr(x12, x2, x1, 2);
+
+ __ Ror(w13, w1, 0);
+ __ Ror(w14, w2, 17);
+ __ Ror(w15, w1, 31);
+ __ Ror(x18, x2, 1);
+ __ Ror(x19, x1, 63);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x76543210, x10);
+ ASSERT_EQUAL_64(0xbb2a1908, x11);
+ ASSERT_EQUAL_64(0x0048d159e26af37bUL, x12);
+ ASSERT_EQUAL_64(0x89abcdef, x13);
+ ASSERT_EQUAL_64(0x19083b2a, x14);
+ ASSERT_EQUAL_64(0x13579bdf, x15);
+ ASSERT_EQUAL_64(0x7f6e5d4c3b2a1908UL, x18);
+ ASSERT_EQUAL_64(0x02468acf13579bdeUL, x19);
+
+ TEARDOWN();
+}
+
+
+TEST(fmov_imm) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s11, 1.0);
+ __ Fmov(d22, -13.0);
+ __ Fmov(s1, 255.0);
+ __ Fmov(d2, 12.34567);
+ __ Fmov(s3, 0.0);
+ __ Fmov(d4, 0.0);
+ __ Fmov(s5, kFP32PositiveInfinity);
+ __ Fmov(d6, kFP64NegativeInfinity);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(1.0, s11);
+ ASSERT_EQUAL_FP64(-13.0, d22);
+ ASSERT_EQUAL_FP32(255.0, s1);
+ ASSERT_EQUAL_FP64(12.34567, d2);
+ ASSERT_EQUAL_FP32(0.0, s3);
+ ASSERT_EQUAL_FP64(0.0, d4);
+ ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s5);
+ ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d6);
+
+ TEARDOWN();
+}
+
+
+TEST(fmov_reg) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s20, 1.0);
+ __ Fmov(w10, s20);
+ __ Fmov(s30, w10);
+ __ Fmov(s5, s20);
+ __ Fmov(d1, -13.0);
+ __ Fmov(x1, d1);
+ __ Fmov(d2, x1);
+ __ Fmov(d4, d1);
+ __ Fmov(d6, rawbits_to_double(0x0123456789abcdefL));
+ __ Fmov(s6, s6);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_32(float_to_rawbits(1.0), w10);
+ ASSERT_EQUAL_FP32(1.0, s30);
+ ASSERT_EQUAL_FP32(1.0, s5);
+ ASSERT_EQUAL_64(double_to_rawbits(-13.0), x1);
+ ASSERT_EQUAL_FP64(-13.0, d2);
+ ASSERT_EQUAL_FP64(-13.0, d4);
+ ASSERT_EQUAL_FP32(rawbits_to_float(0x89abcdef), s6);
+
+ TEARDOWN();
+}
+
+
+TEST(fadd) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s14, -0.0f);
+ __ Fmov(s15, kFP32PositiveInfinity);
+ __ Fmov(s16, kFP32NegativeInfinity);
+ __ Fmov(s17, 3.25f);
+ __ Fmov(s18, 1.0f);
+ __ Fmov(s19, 0.0f);
+
+ __ Fmov(d26, -0.0);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0.0);
+ __ Fmov(d30, -2.0);
+ __ Fmov(d31, 2.25);
+
+ __ Fadd(s0, s17, s18);
+ __ Fadd(s1, s18, s19);
+ __ Fadd(s2, s14, s18);
+ __ Fadd(s3, s15, s18);
+ __ Fadd(s4, s16, s18);
+ __ Fadd(s5, s15, s16);
+ __ Fadd(s6, s16, s15);
+
+ __ Fadd(d7, d30, d31);
+ __ Fadd(d8, d29, d31);
+ __ Fadd(d9, d26, d31);
+ __ Fadd(d10, d27, d31);
+ __ Fadd(d11, d28, d31);
+ __ Fadd(d12, d27, d28);
+ __ Fadd(d13, d28, d27);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(4.25, s0);
+ ASSERT_EQUAL_FP32(1.0, s1);
+ ASSERT_EQUAL_FP32(1.0, s2);
+ ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s3);
+ ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s4);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s5);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
+ ASSERT_EQUAL_FP64(0.25, d7);
+ ASSERT_EQUAL_FP64(2.25, d8);
+ ASSERT_EQUAL_FP64(2.25, d9);
+ ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d10);
+ ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d11);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d12);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
+
+ TEARDOWN();
+}
+
+
+TEST(fsub) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s14, -0.0f);
+ __ Fmov(s15, kFP32PositiveInfinity);
+ __ Fmov(s16, kFP32NegativeInfinity);
+ __ Fmov(s17, 3.25f);
+ __ Fmov(s18, 1.0f);
+ __ Fmov(s19, 0.0f);
+
+ __ Fmov(d26, -0.0);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0.0);
+ __ Fmov(d30, -2.0);
+ __ Fmov(d31, 2.25);
+
+ __ Fsub(s0, s17, s18);
+ __ Fsub(s1, s18, s19);
+ __ Fsub(s2, s14, s18);
+ __ Fsub(s3, s18, s15);
+ __ Fsub(s4, s18, s16);
+ __ Fsub(s5, s15, s15);
+ __ Fsub(s6, s16, s16);
+
+ __ Fsub(d7, d30, d31);
+ __ Fsub(d8, d29, d31);
+ __ Fsub(d9, d26, d31);
+ __ Fsub(d10, d31, d27);
+ __ Fsub(d11, d31, d28);
+ __ Fsub(d12, d27, d27);
+ __ Fsub(d13, d28, d28);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(2.25, s0);
+ ASSERT_EQUAL_FP32(1.0, s1);
+ ASSERT_EQUAL_FP32(-1.0, s2);
+ ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s3);
+ ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s4);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s5);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
+ ASSERT_EQUAL_FP64(-4.25, d7);
+ ASSERT_EQUAL_FP64(-2.25, d8);
+ ASSERT_EQUAL_FP64(-2.25, d9);
+ ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d10);
+ ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d11);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d12);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
+
+ TEARDOWN();
+}
+
+
+TEST(fmul) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s14, -0.0f);
+ __ Fmov(s15, kFP32PositiveInfinity);
+ __ Fmov(s16, kFP32NegativeInfinity);
+ __ Fmov(s17, 3.25f);
+ __ Fmov(s18, 2.0f);
+ __ Fmov(s19, 0.0f);
+ __ Fmov(s20, -2.0f);
+
+ __ Fmov(d26, -0.0);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0.0);
+ __ Fmov(d30, -2.0);
+ __ Fmov(d31, 2.25);
+
+ __ Fmul(s0, s17, s18);
+ __ Fmul(s1, s18, s19);
+ __ Fmul(s2, s14, s14);
+ __ Fmul(s3, s15, s20);
+ __ Fmul(s4, s16, s20);
+ __ Fmul(s5, s15, s19);
+ __ Fmul(s6, s19, s16);
+
+ __ Fmul(d7, d30, d31);
+ __ Fmul(d8, d29, d31);
+ __ Fmul(d9, d26, d26);
+ __ Fmul(d10, d27, d30);
+ __ Fmul(d11, d28, d30);
+ __ Fmul(d12, d27, d29);
+ __ Fmul(d13, d29, d28);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(6.5, s0);
+ ASSERT_EQUAL_FP32(0.0, s1);
+ ASSERT_EQUAL_FP32(0.0, s2);
+ ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s3);
+ ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s4);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s5);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
+ ASSERT_EQUAL_FP64(-4.5, d7);
+ ASSERT_EQUAL_FP64(0.0, d8);
+ ASSERT_EQUAL_FP64(0.0, d9);
+ ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d10);
+ ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d11);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d12);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
+
+ TEARDOWN();
+}
+
+
+static void FmaddFmsubHelper(double n, double m, double a,
+ double fmadd, double fmsub,
+ double fnmadd, double fnmsub) {
+ SETUP();
+ START();
+
+ __ Fmov(d0, n);
+ __ Fmov(d1, m);
+ __ Fmov(d2, a);
+ __ Fmadd(d28, d0, d1, d2);
+ __ Fmsub(d29, d0, d1, d2);
+ __ Fnmadd(d30, d0, d1, d2);
+ __ Fnmsub(d31, d0, d1, d2);
+
+ END();
+ RUN();
+
+ ASSERT_EQUAL_FP64(fmadd, d28);
+ ASSERT_EQUAL_FP64(fmsub, d29);
+ ASSERT_EQUAL_FP64(fnmadd, d30);
+ ASSERT_EQUAL_FP64(fnmsub, d31);
+
+ TEARDOWN();
+}
+
+
+TEST(fmadd_fmsub_double) {
+ INIT_V8();
+
+ // It's hard to check the result of fused operations because the only way to
+ // calculate the result is using fma, which is what the simulator uses anyway.
+ // TODO(jbramley): Add tests to check behaviour against a hardware trace.
+
+ // Basic operation.
+ FmaddFmsubHelper(1.0, 2.0, 3.0, 5.0, 1.0, -5.0, -1.0);
+ FmaddFmsubHelper(-1.0, 2.0, 3.0, 1.0, 5.0, -1.0, -5.0);
+
+ // Check the sign of exact zeroes.
+ // n m a fmadd fmsub fnmadd fnmsub
+ FmaddFmsubHelper(-0.0, +0.0, -0.0, -0.0, +0.0, +0.0, +0.0);
+ FmaddFmsubHelper(+0.0, +0.0, -0.0, +0.0, -0.0, +0.0, +0.0);
+ FmaddFmsubHelper(+0.0, +0.0, +0.0, +0.0, +0.0, -0.0, +0.0);
+ FmaddFmsubHelper(-0.0, +0.0, +0.0, +0.0, +0.0, +0.0, -0.0);
+ FmaddFmsubHelper(+0.0, -0.0, -0.0, -0.0, +0.0, +0.0, +0.0);
+ FmaddFmsubHelper(-0.0, -0.0, -0.0, +0.0, -0.0, +0.0, +0.0);
+ FmaddFmsubHelper(-0.0, -0.0, +0.0, +0.0, +0.0, -0.0, +0.0);
+ FmaddFmsubHelper(+0.0, -0.0, +0.0, +0.0, +0.0, +0.0, -0.0);
+
+ // Check NaN generation.
+ FmaddFmsubHelper(kFP64PositiveInfinity, 0.0, 42.0,
+ kFP64DefaultNaN, kFP64DefaultNaN,
+ kFP64DefaultNaN, kFP64DefaultNaN);
+ FmaddFmsubHelper(0.0, kFP64PositiveInfinity, 42.0,
+ kFP64DefaultNaN, kFP64DefaultNaN,
+ kFP64DefaultNaN, kFP64DefaultNaN);
+ FmaddFmsubHelper(kFP64PositiveInfinity, 1.0, kFP64PositiveInfinity,
+ kFP64PositiveInfinity, // inf + ( inf * 1) = inf
+ kFP64DefaultNaN, // inf + (-inf * 1) = NaN
+ kFP64NegativeInfinity, // -inf + (-inf * 1) = -inf
+ kFP64DefaultNaN); // -inf + ( inf * 1) = NaN
+ FmaddFmsubHelper(kFP64NegativeInfinity, 1.0, kFP64PositiveInfinity,
+ kFP64DefaultNaN, // inf + (-inf * 1) = NaN
+ kFP64PositiveInfinity, // inf + ( inf * 1) = inf
+ kFP64DefaultNaN, // -inf + ( inf * 1) = NaN
+ kFP64NegativeInfinity); // -inf + (-inf * 1) = -inf
+}
+
+
+static void FmaddFmsubHelper(float n, float m, float a,
+ float fmadd, float fmsub,
+ float fnmadd, float fnmsub) {
+ SETUP();
+ START();
+
+ __ Fmov(s0, n);
+ __ Fmov(s1, m);
+ __ Fmov(s2, a);
+ __ Fmadd(s28, s0, s1, s2);
+ __ Fmsub(s29, s0, s1, s2);
+ __ Fnmadd(s30, s0, s1, s2);
+ __ Fnmsub(s31, s0, s1, s2);
+
+ END();
+ RUN();
+
+ ASSERT_EQUAL_FP32(fmadd, s28);
+ ASSERT_EQUAL_FP32(fmsub, s29);
+ ASSERT_EQUAL_FP32(fnmadd, s30);
+ ASSERT_EQUAL_FP32(fnmsub, s31);
+
+ TEARDOWN();
+}
+
+
+TEST(fmadd_fmsub_float) {
+ INIT_V8();
+ // It's hard to check the result of fused operations because the only way to
+ // calculate the result is using fma, which is what the simulator uses anyway.
+ // TODO(jbramley): Add tests to check behaviour against a hardware trace.
+
+ // Basic operation.
+ FmaddFmsubHelper(1.0f, 2.0f, 3.0f, 5.0f, 1.0f, -5.0f, -1.0f);
+ FmaddFmsubHelper(-1.0f, 2.0f, 3.0f, 1.0f, 5.0f, -1.0f, -5.0f);
+
+ // Check the sign of exact zeroes.
+ // n m a fmadd fmsub fnmadd fnmsub
+ FmaddFmsubHelper(-0.0f, +0.0f, -0.0f, -0.0f, +0.0f, +0.0f, +0.0f);
+ FmaddFmsubHelper(+0.0f, +0.0f, -0.0f, +0.0f, -0.0f, +0.0f, +0.0f);
+ FmaddFmsubHelper(+0.0f, +0.0f, +0.0f, +0.0f, +0.0f, -0.0f, +0.0f);
+ FmaddFmsubHelper(-0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, -0.0f);
+ FmaddFmsubHelper(+0.0f, -0.0f, -0.0f, -0.0f, +0.0f, +0.0f, +0.0f);
+ FmaddFmsubHelper(-0.0f, -0.0f, -0.0f, +0.0f, -0.0f, +0.0f, +0.0f);
+ FmaddFmsubHelper(-0.0f, -0.0f, +0.0f, +0.0f, +0.0f, -0.0f, +0.0f);
+ FmaddFmsubHelper(+0.0f, -0.0f, +0.0f, +0.0f, +0.0f, +0.0f, -0.0f);
+
+ // Check NaN generation.
+ FmaddFmsubHelper(kFP32PositiveInfinity, 0.0f, 42.0f,
+ kFP32DefaultNaN, kFP32DefaultNaN,
+ kFP32DefaultNaN, kFP32DefaultNaN);
+ FmaddFmsubHelper(0.0f, kFP32PositiveInfinity, 42.0f,
+ kFP32DefaultNaN, kFP32DefaultNaN,
+ kFP32DefaultNaN, kFP32DefaultNaN);
+ FmaddFmsubHelper(kFP32PositiveInfinity, 1.0f, kFP32PositiveInfinity,
+ kFP32PositiveInfinity, // inf + ( inf * 1) = inf
+ kFP32DefaultNaN, // inf + (-inf * 1) = NaN
+ kFP32NegativeInfinity, // -inf + (-inf * 1) = -inf
+ kFP32DefaultNaN); // -inf + ( inf * 1) = NaN
+ FmaddFmsubHelper(kFP32NegativeInfinity, 1.0f, kFP32PositiveInfinity,
+ kFP32DefaultNaN, // inf + (-inf * 1) = NaN
+ kFP32PositiveInfinity, // inf + ( inf * 1) = inf
+ kFP32DefaultNaN, // -inf + ( inf * 1) = NaN
+ kFP32NegativeInfinity); // -inf + (-inf * 1) = -inf
+}
+
+
+TEST(fmadd_fmsub_double_nans) {
+ INIT_V8();
+ // Make sure that NaN propagation works correctly.
+ double s1 = rawbits_to_double(0x7ff5555511111111);
+ double s2 = rawbits_to_double(0x7ff5555522222222);
+ double sa = rawbits_to_double(0x7ff55555aaaaaaaa);
+ double q1 = rawbits_to_double(0x7ffaaaaa11111111);
+ double q2 = rawbits_to_double(0x7ffaaaaa22222222);
+ double qa = rawbits_to_double(0x7ffaaaaaaaaaaaaa);
+ ASSERT(IsSignallingNaN(s1));
+ ASSERT(IsSignallingNaN(s2));
+ ASSERT(IsSignallingNaN(sa));
+ ASSERT(IsQuietNaN(q1));
+ ASSERT(IsQuietNaN(q2));
+ ASSERT(IsQuietNaN(qa));
+
+ // The input NaNs after passing through ProcessNaN.
+ double s1_proc = rawbits_to_double(0x7ffd555511111111);
+ double s2_proc = rawbits_to_double(0x7ffd555522222222);
+ double sa_proc = rawbits_to_double(0x7ffd5555aaaaaaaa);
+ double q1_proc = q1;
+ double q2_proc = q2;
+ double qa_proc = qa;
+ ASSERT(IsQuietNaN(s1_proc));
+ ASSERT(IsQuietNaN(s2_proc));
+ ASSERT(IsQuietNaN(sa_proc));
+ ASSERT(IsQuietNaN(q1_proc));
+ ASSERT(IsQuietNaN(q2_proc));
+ ASSERT(IsQuietNaN(qa_proc));
+
+ // Negated NaNs as it would be done on ARMv8 hardware.
+ double s1_proc_neg = rawbits_to_double(0xfffd555511111111);
+ double sa_proc_neg = rawbits_to_double(0xfffd5555aaaaaaaa);
+ double q1_proc_neg = rawbits_to_double(0xfffaaaaa11111111);
+ double qa_proc_neg = rawbits_to_double(0xfffaaaaaaaaaaaaa);
+ ASSERT(IsQuietNaN(s1_proc_neg));
+ ASSERT(IsQuietNaN(sa_proc_neg));
+ ASSERT(IsQuietNaN(q1_proc_neg));
+ ASSERT(IsQuietNaN(qa_proc_neg));
+
+ // Quiet NaNs are propagated.
+ FmaddFmsubHelper(q1, 0, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
+ FmaddFmsubHelper(0, q2, 0, q2_proc, q2_proc, q2_proc, q2_proc);
+ FmaddFmsubHelper(0, 0, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
+ FmaddFmsubHelper(q1, q2, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
+ FmaddFmsubHelper(0, q2, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
+ FmaddFmsubHelper(q1, 0, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
+ FmaddFmsubHelper(q1, q2, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
+
+ // Signalling NaNs are propagated, and made quiet.
+ FmaddFmsubHelper(s1, 0, 0, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
+ FmaddFmsubHelper(0, s2, 0, s2_proc, s2_proc, s2_proc, s2_proc);
+ FmaddFmsubHelper(0, 0, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
+ FmaddFmsubHelper(s1, s2, 0, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
+ FmaddFmsubHelper(0, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
+ FmaddFmsubHelper(s1, 0, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
+ FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
+
+ // Signalling NaNs take precedence over quiet NaNs.
+ FmaddFmsubHelper(s1, q2, qa, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
+ FmaddFmsubHelper(q1, s2, qa, s2_proc, s2_proc, s2_proc, s2_proc);
+ FmaddFmsubHelper(q1, q2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
+ FmaddFmsubHelper(s1, s2, qa, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
+ FmaddFmsubHelper(q1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
+ FmaddFmsubHelper(s1, q2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
+ FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
+
+ // A NaN generated by the intermediate op1 * op2 overrides a quiet NaN in a.
+ FmaddFmsubHelper(0, kFP64PositiveInfinity, qa,
+ kFP64DefaultNaN, kFP64DefaultNaN,
+ kFP64DefaultNaN, kFP64DefaultNaN);
+ FmaddFmsubHelper(kFP64PositiveInfinity, 0, qa,
+ kFP64DefaultNaN, kFP64DefaultNaN,
+ kFP64DefaultNaN, kFP64DefaultNaN);
+ FmaddFmsubHelper(0, kFP64NegativeInfinity, qa,
+ kFP64DefaultNaN, kFP64DefaultNaN,
+ kFP64DefaultNaN, kFP64DefaultNaN);
+ FmaddFmsubHelper(kFP64NegativeInfinity, 0, qa,
+ kFP64DefaultNaN, kFP64DefaultNaN,
+ kFP64DefaultNaN, kFP64DefaultNaN);
+}
+
+
+TEST(fmadd_fmsub_float_nans) {
+ INIT_V8();
+ // Make sure that NaN propagation works correctly.
+ float s1 = rawbits_to_float(0x7f951111);
+ float s2 = rawbits_to_float(0x7f952222);
+ float sa = rawbits_to_float(0x7f95aaaa);
+ float q1 = rawbits_to_float(0x7fea1111);
+ float q2 = rawbits_to_float(0x7fea2222);
+ float qa = rawbits_to_float(0x7feaaaaa);
+ ASSERT(IsSignallingNaN(s1));
+ ASSERT(IsSignallingNaN(s2));
+ ASSERT(IsSignallingNaN(sa));
+ ASSERT(IsQuietNaN(q1));
+ ASSERT(IsQuietNaN(q2));
+ ASSERT(IsQuietNaN(qa));
+
+ // The input NaNs after passing through ProcessNaN.
+ float s1_proc = rawbits_to_float(0x7fd51111);
+ float s2_proc = rawbits_to_float(0x7fd52222);
+ float sa_proc = rawbits_to_float(0x7fd5aaaa);
+ float q1_proc = q1;
+ float q2_proc = q2;
+ float qa_proc = qa;
+ ASSERT(IsQuietNaN(s1_proc));
+ ASSERT(IsQuietNaN(s2_proc));
+ ASSERT(IsQuietNaN(sa_proc));
+ ASSERT(IsQuietNaN(q1_proc));
+ ASSERT(IsQuietNaN(q2_proc));
+ ASSERT(IsQuietNaN(qa_proc));
+
+ // Negated NaNs as it would be done on ARMv8 hardware.
+ float s1_proc_neg = rawbits_to_float(0xffd51111);
+ float sa_proc_neg = rawbits_to_float(0xffd5aaaa);
+ float q1_proc_neg = rawbits_to_float(0xffea1111);
+ float qa_proc_neg = rawbits_to_float(0xffeaaaaa);
+ ASSERT(IsQuietNaN(s1_proc_neg));
+ ASSERT(IsQuietNaN(sa_proc_neg));
+ ASSERT(IsQuietNaN(q1_proc_neg));
+ ASSERT(IsQuietNaN(qa_proc_neg));
+
+ // Quiet NaNs are propagated.
+ FmaddFmsubHelper(q1, 0, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
+ FmaddFmsubHelper(0, q2, 0, q2_proc, q2_proc, q2_proc, q2_proc);
+ FmaddFmsubHelper(0, 0, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
+ FmaddFmsubHelper(q1, q2, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
+ FmaddFmsubHelper(0, q2, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
+ FmaddFmsubHelper(q1, 0, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
+ FmaddFmsubHelper(q1, q2, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
+
+ // Signalling NaNs are propagated, and made quiet.
+ FmaddFmsubHelper(s1, 0, 0, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
+ FmaddFmsubHelper(0, s2, 0, s2_proc, s2_proc, s2_proc, s2_proc);
+ FmaddFmsubHelper(0, 0, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
+ FmaddFmsubHelper(s1, s2, 0, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
+ FmaddFmsubHelper(0, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
+ FmaddFmsubHelper(s1, 0, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
+ FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
+
+ // Signalling NaNs take precedence over quiet NaNs.
+ FmaddFmsubHelper(s1, q2, qa, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
+ FmaddFmsubHelper(q1, s2, qa, s2_proc, s2_proc, s2_proc, s2_proc);
+ FmaddFmsubHelper(q1, q2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
+ FmaddFmsubHelper(s1, s2, qa, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
+ FmaddFmsubHelper(q1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
+ FmaddFmsubHelper(s1, q2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
+ FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
+
+ // A NaN generated by the intermediate op1 * op2 overrides a quiet NaN in a.
+ FmaddFmsubHelper(0, kFP32PositiveInfinity, qa,
+ kFP32DefaultNaN, kFP32DefaultNaN,
+ kFP32DefaultNaN, kFP32DefaultNaN);
+ FmaddFmsubHelper(kFP32PositiveInfinity, 0, qa,
+ kFP32DefaultNaN, kFP32DefaultNaN,
+ kFP32DefaultNaN, kFP32DefaultNaN);
+ FmaddFmsubHelper(0, kFP32NegativeInfinity, qa,
+ kFP32DefaultNaN, kFP32DefaultNaN,
+ kFP32DefaultNaN, kFP32DefaultNaN);
+ FmaddFmsubHelper(kFP32NegativeInfinity, 0, qa,
+ kFP32DefaultNaN, kFP32DefaultNaN,
+ kFP32DefaultNaN, kFP32DefaultNaN);
+}
+
+
+TEST(fdiv) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s14, -0.0f);
+ __ Fmov(s15, kFP32PositiveInfinity);
+ __ Fmov(s16, kFP32NegativeInfinity);
+ __ Fmov(s17, 3.25f);
+ __ Fmov(s18, 2.0f);
+ __ Fmov(s19, 2.0f);
+ __ Fmov(s20, -2.0f);
+
+ __ Fmov(d26, -0.0);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0.0);
+ __ Fmov(d30, -2.0);
+ __ Fmov(d31, 2.25);
+
+ __ Fdiv(s0, s17, s18);
+ __ Fdiv(s1, s18, s19);
+ __ Fdiv(s2, s14, s18);
+ __ Fdiv(s3, s18, s15);
+ __ Fdiv(s4, s18, s16);
+ __ Fdiv(s5, s15, s16);
+ __ Fdiv(s6, s14, s14);
+
+ __ Fdiv(d7, d31, d30);
+ __ Fdiv(d8, d29, d31);
+ __ Fdiv(d9, d26, d31);
+ __ Fdiv(d10, d31, d27);
+ __ Fdiv(d11, d31, d28);
+ __ Fdiv(d12, d28, d27);
+ __ Fdiv(d13, d29, d29);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(1.625f, s0);
+ ASSERT_EQUAL_FP32(1.0f, s1);
+ ASSERT_EQUAL_FP32(-0.0f, s2);
+ ASSERT_EQUAL_FP32(0.0f, s3);
+ ASSERT_EQUAL_FP32(-0.0f, s4);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s5);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
+ ASSERT_EQUAL_FP64(-1.125, d7);
+ ASSERT_EQUAL_FP64(0.0, d8);
+ ASSERT_EQUAL_FP64(-0.0, d9);
+ ASSERT_EQUAL_FP64(0.0, d10);
+ ASSERT_EQUAL_FP64(-0.0, d11);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d12);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
+
+ TEARDOWN();
+}
+
+
+static float MinMaxHelper(float n,
+ float m,
+ bool min,
+ float quiet_nan_substitute = 0.0) {
+ uint32_t raw_n = float_to_rawbits(n);
+ uint32_t raw_m = float_to_rawbits(m);
+
+ if (std::isnan(n) && ((raw_n & kSQuietNanMask) == 0)) {
+ // n is signalling NaN.
+ return rawbits_to_float(raw_n | kSQuietNanMask);
+ } else if (std::isnan(m) && ((raw_m & kSQuietNanMask) == 0)) {
+ // m is signalling NaN.
+ return rawbits_to_float(raw_m | kSQuietNanMask);
+ } else if (quiet_nan_substitute == 0.0) {
+ if (std::isnan(n)) {
+ // n is quiet NaN.
+ return n;
+ } else if (std::isnan(m)) {
+ // m is quiet NaN.
+ return m;
+ }
+ } else {
+ // Substitute n or m if one is quiet, but not both.
+ if (std::isnan(n) && !std::isnan(m)) {
+ // n is quiet NaN: replace with substitute.
+ n = quiet_nan_substitute;
+ } else if (!std::isnan(n) && std::isnan(m)) {
+ // m is quiet NaN: replace with substitute.
+ m = quiet_nan_substitute;
+ }
+ }
+
+ if ((n == 0.0) && (m == 0.0) &&
+ (copysign(1.0, n) != copysign(1.0, m))) {
+ return min ? -0.0 : 0.0;
+ }
+
+ return min ? fminf(n, m) : fmaxf(n, m);
+}
+
+
+static double MinMaxHelper(double n,
+ double m,
+ bool min,
+ double quiet_nan_substitute = 0.0) {
+ uint64_t raw_n = double_to_rawbits(n);
+ uint64_t raw_m = double_to_rawbits(m);
+
+ if (std::isnan(n) && ((raw_n & kDQuietNanMask) == 0)) {
+ // n is signalling NaN.
+ return rawbits_to_double(raw_n | kDQuietNanMask);
+ } else if (std::isnan(m) && ((raw_m & kDQuietNanMask) == 0)) {
+ // m is signalling NaN.
+ return rawbits_to_double(raw_m | kDQuietNanMask);
+ } else if (quiet_nan_substitute == 0.0) {
+ if (std::isnan(n)) {
+ // n is quiet NaN.
+ return n;
+ } else if (std::isnan(m)) {
+ // m is quiet NaN.
+ return m;
+ }
+ } else {
+ // Substitute n or m if one is quiet, but not both.
+ if (std::isnan(n) && !std::isnan(m)) {
+ // n is quiet NaN: replace with substitute.
+ n = quiet_nan_substitute;
+ } else if (!std::isnan(n) && std::isnan(m)) {
+ // m is quiet NaN: replace with substitute.
+ m = quiet_nan_substitute;
+ }
+ }
+
+ if ((n == 0.0) && (m == 0.0) &&
+ (copysign(1.0, n) != copysign(1.0, m))) {
+ return min ? -0.0 : 0.0;
+ }
+
+ return min ? fmin(n, m) : fmax(n, m);
+}
+
+
+static void FminFmaxDoubleHelper(double n, double m, double min, double max,
+ double minnm, double maxnm) {
+ SETUP();
+
+ START();
+ __ Fmov(d0, n);
+ __ Fmov(d1, m);
+ __ Fmin(d28, d0, d1);
+ __ Fmax(d29, d0, d1);
+ __ Fminnm(d30, d0, d1);
+ __ Fmaxnm(d31, d0, d1);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP64(min, d28);
+ ASSERT_EQUAL_FP64(max, d29);
+ ASSERT_EQUAL_FP64(minnm, d30);
+ ASSERT_EQUAL_FP64(maxnm, d31);
+
+ TEARDOWN();
+}
+
+
+TEST(fmax_fmin_d) {
+ INIT_V8();
+ // Use non-standard NaNs to check that the payload bits are preserved.
+ double snan = rawbits_to_double(0x7ff5555512345678);
+ double qnan = rawbits_to_double(0x7ffaaaaa87654321);
+
+ double snan_processed = rawbits_to_double(0x7ffd555512345678);
+ double qnan_processed = qnan;
+
+ ASSERT(IsSignallingNaN(snan));
+ ASSERT(IsQuietNaN(qnan));
+ ASSERT(IsQuietNaN(snan_processed));
+ ASSERT(IsQuietNaN(qnan_processed));
+
+ // Bootstrap tests.
+ FminFmaxDoubleHelper(0, 0, 0, 0, 0, 0);
+ FminFmaxDoubleHelper(0, 1, 0, 1, 0, 1);
+ FminFmaxDoubleHelper(kFP64PositiveInfinity, kFP64NegativeInfinity,
+ kFP64NegativeInfinity, kFP64PositiveInfinity,
+ kFP64NegativeInfinity, kFP64PositiveInfinity);
+ FminFmaxDoubleHelper(snan, 0,
+ snan_processed, snan_processed,
+ snan_processed, snan_processed);
+ FminFmaxDoubleHelper(0, snan,
+ snan_processed, snan_processed,
+ snan_processed, snan_processed);
+ FminFmaxDoubleHelper(qnan, 0,
+ qnan_processed, qnan_processed,
+ 0, 0);
+ FminFmaxDoubleHelper(0, qnan,
+ qnan_processed, qnan_processed,
+ 0, 0);
+ FminFmaxDoubleHelper(qnan, snan,
+ snan_processed, snan_processed,
+ snan_processed, snan_processed);
+ FminFmaxDoubleHelper(snan, qnan,
+ snan_processed, snan_processed,
+ snan_processed, snan_processed);
+
+ // Iterate over all combinations of inputs.
+ double inputs[] = { DBL_MAX, DBL_MIN, 1.0, 0.0,
+ -DBL_MAX, -DBL_MIN, -1.0, -0.0,
+ kFP64PositiveInfinity, kFP64NegativeInfinity,
+ kFP64QuietNaN, kFP64SignallingNaN };
+
+ const int count = sizeof(inputs) / sizeof(inputs[0]);
+
+ for (int in = 0; in < count; in++) {
+ double n = inputs[in];
+ for (int im = 0; im < count; im++) {
+ double m = inputs[im];
+ FminFmaxDoubleHelper(n, m,
+ MinMaxHelper(n, m, true),
+ MinMaxHelper(n, m, false),
+ MinMaxHelper(n, m, true, kFP64PositiveInfinity),
+ MinMaxHelper(n, m, false, kFP64NegativeInfinity));
+ }
+ }
+}
+
+
+static void FminFmaxFloatHelper(float n, float m, float min, float max,
+ float minnm, float maxnm) {
+ SETUP();
+
+ START();
+ __ Fmov(s0, n);
+ __ Fmov(s1, m);
+ __ Fmin(s28, s0, s1);
+ __ Fmax(s29, s0, s1);
+ __ Fminnm(s30, s0, s1);
+ __ Fmaxnm(s31, s0, s1);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(min, s28);
+ ASSERT_EQUAL_FP32(max, s29);
+ ASSERT_EQUAL_FP32(minnm, s30);
+ ASSERT_EQUAL_FP32(maxnm, s31);
+
+ TEARDOWN();
+}
+
+
+TEST(fmax_fmin_s) {
+ INIT_V8();
+ // Use non-standard NaNs to check that the payload bits are preserved.
+ float snan = rawbits_to_float(0x7f951234);
+ float qnan = rawbits_to_float(0x7fea8765);
+
+ float snan_processed = rawbits_to_float(0x7fd51234);
+ float qnan_processed = qnan;
+
+ ASSERT(IsSignallingNaN(snan));
+ ASSERT(IsQuietNaN(qnan));
+ ASSERT(IsQuietNaN(snan_processed));
+ ASSERT(IsQuietNaN(qnan_processed));
+
+ // Bootstrap tests.
+ FminFmaxFloatHelper(0, 0, 0, 0, 0, 0);
+ FminFmaxFloatHelper(0, 1, 0, 1, 0, 1);
+ FminFmaxFloatHelper(kFP32PositiveInfinity, kFP32NegativeInfinity,
+ kFP32NegativeInfinity, kFP32PositiveInfinity,
+ kFP32NegativeInfinity, kFP32PositiveInfinity);
+ FminFmaxFloatHelper(snan, 0,
+ snan_processed, snan_processed,
+ snan_processed, snan_processed);
+ FminFmaxFloatHelper(0, snan,
+ snan_processed, snan_processed,
+ snan_processed, snan_processed);
+ FminFmaxFloatHelper(qnan, 0,
+ qnan_processed, qnan_processed,
+ 0, 0);
+ FminFmaxFloatHelper(0, qnan,
+ qnan_processed, qnan_processed,
+ 0, 0);
+ FminFmaxFloatHelper(qnan, snan,
+ snan_processed, snan_processed,
+ snan_processed, snan_processed);
+ FminFmaxFloatHelper(snan, qnan,
+ snan_processed, snan_processed,
+ snan_processed, snan_processed);
+
+ // Iterate over all combinations of inputs.
+ float inputs[] = { FLT_MAX, FLT_MIN, 1.0, 0.0,
+ -FLT_MAX, -FLT_MIN, -1.0, -0.0,
+ kFP32PositiveInfinity, kFP32NegativeInfinity,
+ kFP32QuietNaN, kFP32SignallingNaN };
+
+ const int count = sizeof(inputs) / sizeof(inputs[0]);
+
+ for (int in = 0; in < count; in++) {
+ float n = inputs[in];
+ for (int im = 0; im < count; im++) {
+ float m = inputs[im];
+ FminFmaxFloatHelper(n, m,
+ MinMaxHelper(n, m, true),
+ MinMaxHelper(n, m, false),
+ MinMaxHelper(n, m, true, kFP32PositiveInfinity),
+ MinMaxHelper(n, m, false, kFP32NegativeInfinity));
+ }
+ }
+}
+
+
+TEST(fccmp) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s16, 0.0);
+ __ Fmov(s17, 0.5);
+ __ Fmov(d18, -0.5);
+ __ Fmov(d19, -1.0);
+ __ Mov(x20, 0);
+
+ __ Cmp(x20, 0);
+ __ Fccmp(s16, s16, NoFlag, eq);
+ __ Mrs(x0, NZCV);
+
+ __ Cmp(x20, 0);
+ __ Fccmp(s16, s16, VFlag, ne);
+ __ Mrs(x1, NZCV);
+
+ __ Cmp(x20, 0);
+ __ Fccmp(s16, s17, CFlag, ge);
+ __ Mrs(x2, NZCV);
+
+ __ Cmp(x20, 0);
+ __ Fccmp(s16, s17, CVFlag, lt);
+ __ Mrs(x3, NZCV);
+
+ __ Cmp(x20, 0);
+ __ Fccmp(d18, d18, ZFlag, le);
+ __ Mrs(x4, NZCV);
+
+ __ Cmp(x20, 0);
+ __ Fccmp(d18, d18, ZVFlag, gt);
+ __ Mrs(x5, NZCV);
+
+ __ Cmp(x20, 0);
+ __ Fccmp(d18, d19, ZCVFlag, ls);
+ __ Mrs(x6, NZCV);
+
+ __ Cmp(x20, 0);
+ __ Fccmp(d18, d19, NFlag, hi);
+ __ Mrs(x7, NZCV);
+
+ __ fccmp(s16, s16, NFlag, al);
+ __ Mrs(x8, NZCV);
+
+ __ fccmp(d18, d18, NFlag, nv);
+ __ Mrs(x9, NZCV);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_32(ZCFlag, w0);
+ ASSERT_EQUAL_32(VFlag, w1);
+ ASSERT_EQUAL_32(NFlag, w2);
+ ASSERT_EQUAL_32(CVFlag, w3);
+ ASSERT_EQUAL_32(ZCFlag, w4);
+ ASSERT_EQUAL_32(ZVFlag, w5);
+ ASSERT_EQUAL_32(CFlag, w6);
+ ASSERT_EQUAL_32(NFlag, w7);
+ ASSERT_EQUAL_32(ZCFlag, w8);
+ ASSERT_EQUAL_32(ZCFlag, w9);
+
+ TEARDOWN();
+}
+
+
+TEST(fcmp) {
+ INIT_V8();
+ SETUP();
+
+ START();
+
+ // Some of these tests require a floating-point scratch register assigned to
+ // the macro assembler, but most do not.
+ {
+ // We're going to mess around with the available scratch registers in this
+ // test. A UseScratchRegisterScope will make sure that they are restored to
+ // the default values once we're finished.
+ UseScratchRegisterScope temps(&masm);
+ masm.FPTmpList()->set_list(0);
+
+ __ Fmov(s8, 0.0);
+ __ Fmov(s9, 0.5);
+ __ Mov(w18, 0x7f800001); // Single precision NaN.
+ __ Fmov(s18, w18);
+
+ __ Fcmp(s8, s8);
+ __ Mrs(x0, NZCV);
+ __ Fcmp(s8, s9);
+ __ Mrs(x1, NZCV);
+ __ Fcmp(s9, s8);
+ __ Mrs(x2, NZCV);
+ __ Fcmp(s8, s18);
+ __ Mrs(x3, NZCV);
+ __ Fcmp(s18, s18);
+ __ Mrs(x4, NZCV);
+ __ Fcmp(s8, 0.0);
+ __ Mrs(x5, NZCV);
+ masm.FPTmpList()->set_list(d0.Bit());
+ __ Fcmp(s8, 255.0);
+ masm.FPTmpList()->set_list(0);
+ __ Mrs(x6, NZCV);
+
+ __ Fmov(d19, 0.0);
+ __ Fmov(d20, 0.5);
+ __ Mov(x21, 0x7ff0000000000001UL); // Double precision NaN.
+ __ Fmov(d21, x21);
+
+ __ Fcmp(d19, d19);
+ __ Mrs(x10, NZCV);
+ __ Fcmp(d19, d20);
+ __ Mrs(x11, NZCV);
+ __ Fcmp(d20, d19);
+ __ Mrs(x12, NZCV);
+ __ Fcmp(d19, d21);
+ __ Mrs(x13, NZCV);
+ __ Fcmp(d21, d21);
+ __ Mrs(x14, NZCV);
+ __ Fcmp(d19, 0.0);
+ __ Mrs(x15, NZCV);
+ masm.FPTmpList()->set_list(d0.Bit());
+ __ Fcmp(d19, 12.3456);
+ masm.FPTmpList()->set_list(0);
+ __ Mrs(x16, NZCV);
+ }
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_32(ZCFlag, w0);
+ ASSERT_EQUAL_32(NFlag, w1);
+ ASSERT_EQUAL_32(CFlag, w2);
+ ASSERT_EQUAL_32(CVFlag, w3);
+ ASSERT_EQUAL_32(CVFlag, w4);
+ ASSERT_EQUAL_32(ZCFlag, w5);
+ ASSERT_EQUAL_32(NFlag, w6);
+ ASSERT_EQUAL_32(ZCFlag, w10);
+ ASSERT_EQUAL_32(NFlag, w11);
+ ASSERT_EQUAL_32(CFlag, w12);
+ ASSERT_EQUAL_32(CVFlag, w13);
+ ASSERT_EQUAL_32(CVFlag, w14);
+ ASSERT_EQUAL_32(ZCFlag, w15);
+ ASSERT_EQUAL_32(NFlag, w16);
+
+ TEARDOWN();
+}
+
+
+TEST(fcsel) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x16, 0);
+ __ Fmov(s16, 1.0);
+ __ Fmov(s17, 2.0);
+ __ Fmov(d18, 3.0);
+ __ Fmov(d19, 4.0);
+
+ __ Cmp(x16, 0);
+ __ Fcsel(s0, s16, s17, eq);
+ __ Fcsel(s1, s16, s17, ne);
+ __ Fcsel(d2, d18, d19, eq);
+ __ Fcsel(d3, d18, d19, ne);
+ __ fcsel(s4, s16, s17, al);
+ __ fcsel(d5, d18, d19, nv);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(1.0, s0);
+ ASSERT_EQUAL_FP32(2.0, s1);
+ ASSERT_EQUAL_FP64(3.0, d2);
+ ASSERT_EQUAL_FP64(4.0, d3);
+ ASSERT_EQUAL_FP32(1.0, s4);
+ ASSERT_EQUAL_FP64(3.0, d5);
+
+ TEARDOWN();
+}
+
+
+TEST(fneg) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s16, 1.0);
+ __ Fmov(s17, 0.0);
+ __ Fmov(s18, kFP32PositiveInfinity);
+ __ Fmov(d19, 1.0);
+ __ Fmov(d20, 0.0);
+ __ Fmov(d21, kFP64PositiveInfinity);
+
+ __ Fneg(s0, s16);
+ __ Fneg(s1, s0);
+ __ Fneg(s2, s17);
+ __ Fneg(s3, s2);
+ __ Fneg(s4, s18);
+ __ Fneg(s5, s4);
+ __ Fneg(d6, d19);
+ __ Fneg(d7, d6);
+ __ Fneg(d8, d20);
+ __ Fneg(d9, d8);
+ __ Fneg(d10, d21);
+ __ Fneg(d11, d10);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(-1.0, s0);
+ ASSERT_EQUAL_FP32(1.0, s1);
+ ASSERT_EQUAL_FP32(-0.0, s2);
+ ASSERT_EQUAL_FP32(0.0, s3);
+ ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s4);
+ ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s5);
+ ASSERT_EQUAL_FP64(-1.0, d6);
+ ASSERT_EQUAL_FP64(1.0, d7);
+ ASSERT_EQUAL_FP64(-0.0, d8);
+ ASSERT_EQUAL_FP64(0.0, d9);
+ ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d10);
+ ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d11);
+
+ TEARDOWN();
+}
+
+
+TEST(fabs) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s16, -1.0);
+ __ Fmov(s17, -0.0);
+ __ Fmov(s18, kFP32NegativeInfinity);
+ __ Fmov(d19, -1.0);
+ __ Fmov(d20, -0.0);
+ __ Fmov(d21, kFP64NegativeInfinity);
+
+ __ Fabs(s0, s16);
+ __ Fabs(s1, s0);
+ __ Fabs(s2, s17);
+ __ Fabs(s3, s18);
+ __ Fabs(d4, d19);
+ __ Fabs(d5, d4);
+ __ Fabs(d6, d20);
+ __ Fabs(d7, d21);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(1.0, s0);
+ ASSERT_EQUAL_FP32(1.0, s1);
+ ASSERT_EQUAL_FP32(0.0, s2);
+ ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s3);
+ ASSERT_EQUAL_FP64(1.0, d4);
+ ASSERT_EQUAL_FP64(1.0, d5);
+ ASSERT_EQUAL_FP64(0.0, d6);
+ ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d7);
+
+ TEARDOWN();
+}
+
+
+TEST(fsqrt) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s16, 0.0);
+ __ Fmov(s17, 1.0);
+ __ Fmov(s18, 0.25);
+ __ Fmov(s19, 65536.0);
+ __ Fmov(s20, -0.0);
+ __ Fmov(s21, kFP32PositiveInfinity);
+ __ Fmov(s22, -1.0);
+ __ Fmov(d23, 0.0);
+ __ Fmov(d24, 1.0);
+ __ Fmov(d25, 0.25);
+ __ Fmov(d26, 4294967296.0);
+ __ Fmov(d27, -0.0);
+ __ Fmov(d28, kFP64PositiveInfinity);
+ __ Fmov(d29, -1.0);
+
+ __ Fsqrt(s0, s16);
+ __ Fsqrt(s1, s17);
+ __ Fsqrt(s2, s18);
+ __ Fsqrt(s3, s19);
+ __ Fsqrt(s4, s20);
+ __ Fsqrt(s5, s21);
+ __ Fsqrt(s6, s22);
+ __ Fsqrt(d7, d23);
+ __ Fsqrt(d8, d24);
+ __ Fsqrt(d9, d25);
+ __ Fsqrt(d10, d26);
+ __ Fsqrt(d11, d27);
+ __ Fsqrt(d12, d28);
+ __ Fsqrt(d13, d29);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(0.0, s0);
+ ASSERT_EQUAL_FP32(1.0, s1);
+ ASSERT_EQUAL_FP32(0.5, s2);
+ ASSERT_EQUAL_FP32(256.0, s3);
+ ASSERT_EQUAL_FP32(-0.0, s4);
+ ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s5);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
+ ASSERT_EQUAL_FP64(0.0, d7);
+ ASSERT_EQUAL_FP64(1.0, d8);
+ ASSERT_EQUAL_FP64(0.5, d9);
+ ASSERT_EQUAL_FP64(65536.0, d10);
+ ASSERT_EQUAL_FP64(-0.0, d11);
+ ASSERT_EQUAL_FP64(kFP32PositiveInfinity, d12);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
+
+ TEARDOWN();
+}
+
+
+TEST(frinta) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s16, 1.0);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 1.5);
+ __ Fmov(s19, 1.9);
+ __ Fmov(s20, 2.5);
+ __ Fmov(s21, -1.5);
+ __ Fmov(s22, -2.5);
+ __ Fmov(s23, kFP32PositiveInfinity);
+ __ Fmov(s24, kFP32NegativeInfinity);
+ __ Fmov(s25, 0.0);
+ __ Fmov(s26, -0.0);
+
+ __ Frinta(s0, s16);
+ __ Frinta(s1, s17);
+ __ Frinta(s2, s18);
+ __ Frinta(s3, s19);
+ __ Frinta(s4, s20);
+ __ Frinta(s5, s21);
+ __ Frinta(s6, s22);
+ __ Frinta(s7, s23);
+ __ Frinta(s8, s24);
+ __ Frinta(s9, s25);
+ __ Frinta(s10, s26);
+
+ __ Fmov(d16, 1.0);
+ __ Fmov(d17, 1.1);
+ __ Fmov(d18, 1.5);
+ __ Fmov(d19, 1.9);
+ __ Fmov(d20, 2.5);
+ __ Fmov(d21, -1.5);
+ __ Fmov(d22, -2.5);
+ __ Fmov(d23, kFP32PositiveInfinity);
+ __ Fmov(d24, kFP32NegativeInfinity);
+ __ Fmov(d25, 0.0);
+ __ Fmov(d26, -0.0);
+
+ __ Frinta(d11, d16);
+ __ Frinta(d12, d17);
+ __ Frinta(d13, d18);
+ __ Frinta(d14, d19);
+ __ Frinta(d15, d20);
+ __ Frinta(d16, d21);
+ __ Frinta(d17, d22);
+ __ Frinta(d18, d23);
+ __ Frinta(d19, d24);
+ __ Frinta(d20, d25);
+ __ Frinta(d21, d26);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(1.0, s0);
+ ASSERT_EQUAL_FP32(1.0, s1);
+ ASSERT_EQUAL_FP32(2.0, s2);
+ ASSERT_EQUAL_FP32(2.0, s3);
+ ASSERT_EQUAL_FP32(3.0, s4);
+ ASSERT_EQUAL_FP32(-2.0, s5);
+ ASSERT_EQUAL_FP32(-3.0, s6);
+ ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s7);
+ ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s8);
+ ASSERT_EQUAL_FP32(0.0, s9);
+ ASSERT_EQUAL_FP32(-0.0, s10);
+ ASSERT_EQUAL_FP64(1.0, d11);
+ ASSERT_EQUAL_FP64(1.0, d12);
+ ASSERT_EQUAL_FP64(2.0, d13);
+ ASSERT_EQUAL_FP64(2.0, d14);
+ ASSERT_EQUAL_FP64(3.0, d15);
+ ASSERT_EQUAL_FP64(-2.0, d16);
+ ASSERT_EQUAL_FP64(-3.0, d17);
+ ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d18);
+ ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d19);
+ ASSERT_EQUAL_FP64(0.0, d20);
+ ASSERT_EQUAL_FP64(-0.0, d21);
+
+ TEARDOWN();
+}
+
+
+TEST(frintn) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s16, 1.0);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 1.5);
+ __ Fmov(s19, 1.9);
+ __ Fmov(s20, 2.5);
+ __ Fmov(s21, -1.5);
+ __ Fmov(s22, -2.5);
+ __ Fmov(s23, kFP32PositiveInfinity);
+ __ Fmov(s24, kFP32NegativeInfinity);
+ __ Fmov(s25, 0.0);
+ __ Fmov(s26, -0.0);
+
+ __ Frintn(s0, s16);
+ __ Frintn(s1, s17);
+ __ Frintn(s2, s18);
+ __ Frintn(s3, s19);
+ __ Frintn(s4, s20);
+ __ Frintn(s5, s21);
+ __ Frintn(s6, s22);
+ __ Frintn(s7, s23);
+ __ Frintn(s8, s24);
+ __ Frintn(s9, s25);
+ __ Frintn(s10, s26);
+
+ __ Fmov(d16, 1.0);
+ __ Fmov(d17, 1.1);
+ __ Fmov(d18, 1.5);
+ __ Fmov(d19, 1.9);
+ __ Fmov(d20, 2.5);
+ __ Fmov(d21, -1.5);
+ __ Fmov(d22, -2.5);
+ __ Fmov(d23, kFP32PositiveInfinity);
+ __ Fmov(d24, kFP32NegativeInfinity);
+ __ Fmov(d25, 0.0);
+ __ Fmov(d26, -0.0);
+
+ __ Frintn(d11, d16);
+ __ Frintn(d12, d17);
+ __ Frintn(d13, d18);
+ __ Frintn(d14, d19);
+ __ Frintn(d15, d20);
+ __ Frintn(d16, d21);
+ __ Frintn(d17, d22);
+ __ Frintn(d18, d23);
+ __ Frintn(d19, d24);
+ __ Frintn(d20, d25);
+ __ Frintn(d21, d26);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(1.0, s0);
+ ASSERT_EQUAL_FP32(1.0, s1);
+ ASSERT_EQUAL_FP32(2.0, s2);
+ ASSERT_EQUAL_FP32(2.0, s3);
+ ASSERT_EQUAL_FP32(2.0, s4);
+ ASSERT_EQUAL_FP32(-2.0, s5);
+ ASSERT_EQUAL_FP32(-2.0, s6);
+ ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s7);
+ ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s8);
+ ASSERT_EQUAL_FP32(0.0, s9);
+ ASSERT_EQUAL_FP32(-0.0, s10);
+ ASSERT_EQUAL_FP64(1.0, d11);
+ ASSERT_EQUAL_FP64(1.0, d12);
+ ASSERT_EQUAL_FP64(2.0, d13);
+ ASSERT_EQUAL_FP64(2.0, d14);
+ ASSERT_EQUAL_FP64(2.0, d15);
+ ASSERT_EQUAL_FP64(-2.0, d16);
+ ASSERT_EQUAL_FP64(-2.0, d17);
+ ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d18);
+ ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d19);
+ ASSERT_EQUAL_FP64(0.0, d20);
+ ASSERT_EQUAL_FP64(-0.0, d21);
+
+ TEARDOWN();
+}
+
+
+TEST(frintz) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s16, 1.0);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 1.5);
+ __ Fmov(s19, 1.9);
+ __ Fmov(s20, 2.5);
+ __ Fmov(s21, -1.5);
+ __ Fmov(s22, -2.5);
+ __ Fmov(s23, kFP32PositiveInfinity);
+ __ Fmov(s24, kFP32NegativeInfinity);
+ __ Fmov(s25, 0.0);
+ __ Fmov(s26, -0.0);
+
+ __ Frintz(s0, s16);
+ __ Frintz(s1, s17);
+ __ Frintz(s2, s18);
+ __ Frintz(s3, s19);
+ __ Frintz(s4, s20);
+ __ Frintz(s5, s21);
+ __ Frintz(s6, s22);
+ __ Frintz(s7, s23);
+ __ Frintz(s8, s24);
+ __ Frintz(s9, s25);
+ __ Frintz(s10, s26);
+
+ __ Fmov(d16, 1.0);
+ __ Fmov(d17, 1.1);
+ __ Fmov(d18, 1.5);
+ __ Fmov(d19, 1.9);
+ __ Fmov(d20, 2.5);
+ __ Fmov(d21, -1.5);
+ __ Fmov(d22, -2.5);
+ __ Fmov(d23, kFP32PositiveInfinity);
+ __ Fmov(d24, kFP32NegativeInfinity);
+ __ Fmov(d25, 0.0);
+ __ Fmov(d26, -0.0);
+
+ __ Frintz(d11, d16);
+ __ Frintz(d12, d17);
+ __ Frintz(d13, d18);
+ __ Frintz(d14, d19);
+ __ Frintz(d15, d20);
+ __ Frintz(d16, d21);
+ __ Frintz(d17, d22);
+ __ Frintz(d18, d23);
+ __ Frintz(d19, d24);
+ __ Frintz(d20, d25);
+ __ Frintz(d21, d26);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(1.0, s0);
+ ASSERT_EQUAL_FP32(1.0, s1);
+ ASSERT_EQUAL_FP32(1.0, s2);
+ ASSERT_EQUAL_FP32(1.0, s3);
+ ASSERT_EQUAL_FP32(2.0, s4);
+ ASSERT_EQUAL_FP32(-1.0, s5);
+ ASSERT_EQUAL_FP32(-2.0, s6);
+ ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s7);
+ ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s8);
+ ASSERT_EQUAL_FP32(0.0, s9);
+ ASSERT_EQUAL_FP32(-0.0, s10);
+ ASSERT_EQUAL_FP64(1.0, d11);
+ ASSERT_EQUAL_FP64(1.0, d12);
+ ASSERT_EQUAL_FP64(1.0, d13);
+ ASSERT_EQUAL_FP64(1.0, d14);
+ ASSERT_EQUAL_FP64(2.0, d15);
+ ASSERT_EQUAL_FP64(-1.0, d16);
+ ASSERT_EQUAL_FP64(-2.0, d17);
+ ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d18);
+ ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d19);
+ ASSERT_EQUAL_FP64(0.0, d20);
+ ASSERT_EQUAL_FP64(-0.0, d21);
+
+ TEARDOWN();
+}
+
+
+TEST(fcvt_ds) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s16, 1.0);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 1.5);
+ __ Fmov(s19, 1.9);
+ __ Fmov(s20, 2.5);
+ __ Fmov(s21, -1.5);
+ __ Fmov(s22, -2.5);
+ __ Fmov(s23, kFP32PositiveInfinity);
+ __ Fmov(s24, kFP32NegativeInfinity);
+ __ Fmov(s25, 0.0);
+ __ Fmov(s26, -0.0);
+ __ Fmov(s27, FLT_MAX);
+ __ Fmov(s28, FLT_MIN);
+ __ Fmov(s29, rawbits_to_float(0x7fc12345)); // Quiet NaN.
+ __ Fmov(s30, rawbits_to_float(0x7f812345)); // Signalling NaN.
+
+ __ Fcvt(d0, s16);
+ __ Fcvt(d1, s17);
+ __ Fcvt(d2, s18);
+ __ Fcvt(d3, s19);
+ __ Fcvt(d4, s20);
+ __ Fcvt(d5, s21);
+ __ Fcvt(d6, s22);
+ __ Fcvt(d7, s23);
+ __ Fcvt(d8, s24);
+ __ Fcvt(d9, s25);
+ __ Fcvt(d10, s26);
+ __ Fcvt(d11, s27);
+ __ Fcvt(d12, s28);
+ __ Fcvt(d13, s29);
+ __ Fcvt(d14, s30);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP64(1.0f, d0);
+ ASSERT_EQUAL_FP64(1.1f, d1);
+ ASSERT_EQUAL_FP64(1.5f, d2);
+ ASSERT_EQUAL_FP64(1.9f, d3);
+ ASSERT_EQUAL_FP64(2.5f, d4);
+ ASSERT_EQUAL_FP64(-1.5f, d5);
+ ASSERT_EQUAL_FP64(-2.5f, d6);
+ ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d7);
+ ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d8);
+ ASSERT_EQUAL_FP64(0.0f, d9);
+ ASSERT_EQUAL_FP64(-0.0f, d10);
+ ASSERT_EQUAL_FP64(FLT_MAX, d11);
+ ASSERT_EQUAL_FP64(FLT_MIN, d12);
+
+ // Check that the NaN payload is preserved according to ARM64 conversion
+ // rules:
+ // - The sign bit is preserved.
+ // - The top bit of the mantissa is forced to 1 (making it a quiet NaN).
+ // - The remaining mantissa bits are copied until they run out.
+ // - The low-order bits that haven't already been assigned are set to 0.
+ ASSERT_EQUAL_FP64(rawbits_to_double(0x7ff82468a0000000), d13);
+ ASSERT_EQUAL_FP64(rawbits_to_double(0x7ff82468a0000000), d14);
+
+ TEARDOWN();
+}
+
+
+TEST(fcvt_sd) {
+ INIT_V8();
+ // There are a huge number of corner-cases to check, so this test iterates
+ // through a list. The list is then negated and checked again (since the sign
+ // is irrelevant in ties-to-even rounding), so the list shouldn't include any
+ // negative values.
+ //
+ // Note that this test only checks ties-to-even rounding, because that is all
+ // that the simulator supports.
+ struct {double in; float expected;} test[] = {
+ // Check some simple conversions.
+ {0.0, 0.0f},
+ {1.0, 1.0f},
+ {1.5, 1.5f},
+ {2.0, 2.0f},
+ {FLT_MAX, FLT_MAX},
+ // - The smallest normalized float.
+ {pow(2.0, -126), powf(2, -126)},
+ // - Normal floats that need (ties-to-even) rounding.
+ // For normalized numbers:
+ // bit 29 (0x0000000020000000) is the lowest-order bit which will
+ // fit in the float's mantissa.
+ {rawbits_to_double(0x3ff0000000000000), rawbits_to_float(0x3f800000)},
+ {rawbits_to_double(0x3ff0000000000001), rawbits_to_float(0x3f800000)},
+ {rawbits_to_double(0x3ff0000010000000), rawbits_to_float(0x3f800000)},
+ {rawbits_to_double(0x3ff0000010000001), rawbits_to_float(0x3f800001)},
+ {rawbits_to_double(0x3ff0000020000000), rawbits_to_float(0x3f800001)},
+ {rawbits_to_double(0x3ff0000020000001), rawbits_to_float(0x3f800001)},
+ {rawbits_to_double(0x3ff0000030000000), rawbits_to_float(0x3f800002)},
+ {rawbits_to_double(0x3ff0000030000001), rawbits_to_float(0x3f800002)},
+ {rawbits_to_double(0x3ff0000040000000), rawbits_to_float(0x3f800002)},
+ {rawbits_to_double(0x3ff0000040000001), rawbits_to_float(0x3f800002)},
+ {rawbits_to_double(0x3ff0000050000000), rawbits_to_float(0x3f800002)},
+ {rawbits_to_double(0x3ff0000050000001), rawbits_to_float(0x3f800003)},
+ {rawbits_to_double(0x3ff0000060000000), rawbits_to_float(0x3f800003)},
+ // - A mantissa that overflows into the exponent during rounding.
+ {rawbits_to_double(0x3feffffff0000000), rawbits_to_float(0x3f800000)},
+ // - The largest double that rounds to a normal float.
+ {rawbits_to_double(0x47efffffefffffff), rawbits_to_float(0x7f7fffff)},
+
+ // Doubles that are too big for a float.
+ {kFP64PositiveInfinity, kFP32PositiveInfinity},
+ {DBL_MAX, kFP32PositiveInfinity},
+ // - The smallest exponent that's too big for a float.
+ {pow(2.0, 128), kFP32PositiveInfinity},
+ // - This exponent is in range, but the value rounds to infinity.
+ {rawbits_to_double(0x47effffff0000000), kFP32PositiveInfinity},
+
+ // Doubles that are too small for a float.
+ // - The smallest (subnormal) double.
+ {DBL_MIN, 0.0},
+ // - The largest double which is too small for a subnormal float.
+ {rawbits_to_double(0x3690000000000000), rawbits_to_float(0x00000000)},
+
+ // Normal doubles that become subnormal floats.
+ // - The largest subnormal float.
+ {rawbits_to_double(0x380fffffc0000000), rawbits_to_float(0x007fffff)},
+ // - The smallest subnormal float.
+ {rawbits_to_double(0x36a0000000000000), rawbits_to_float(0x00000001)},
+ // - Subnormal floats that need (ties-to-even) rounding.
+ // For these subnormals:
+ // bit 34 (0x0000000400000000) is the lowest-order bit which will
+ // fit in the float's mantissa.
+ {rawbits_to_double(0x37c159e000000000), rawbits_to_float(0x00045678)},
+ {rawbits_to_double(0x37c159e000000001), rawbits_to_float(0x00045678)},
+ {rawbits_to_double(0x37c159e200000000), rawbits_to_float(0x00045678)},
+ {rawbits_to_double(0x37c159e200000001), rawbits_to_float(0x00045679)},
+ {rawbits_to_double(0x37c159e400000000), rawbits_to_float(0x00045679)},
+ {rawbits_to_double(0x37c159e400000001), rawbits_to_float(0x00045679)},
+ {rawbits_to_double(0x37c159e600000000), rawbits_to_float(0x0004567a)},
+ {rawbits_to_double(0x37c159e600000001), rawbits_to_float(0x0004567a)},
+ {rawbits_to_double(0x37c159e800000000), rawbits_to_float(0x0004567a)},
+ {rawbits_to_double(0x37c159e800000001), rawbits_to_float(0x0004567a)},
+ {rawbits_to_double(0x37c159ea00000000), rawbits_to_float(0x0004567a)},
+ {rawbits_to_double(0x37c159ea00000001), rawbits_to_float(0x0004567b)},
+ {rawbits_to_double(0x37c159ec00000000), rawbits_to_float(0x0004567b)},
+ // - The smallest double which rounds up to become a subnormal float.
+ {rawbits_to_double(0x3690000000000001), rawbits_to_float(0x00000001)},
+
+ // Check NaN payload preservation.
+ {rawbits_to_double(0x7ff82468a0000000), rawbits_to_float(0x7fc12345)},
+ {rawbits_to_double(0x7ff82468bfffffff), rawbits_to_float(0x7fc12345)},
+ // - Signalling NaNs become quiet NaNs.
+ {rawbits_to_double(0x7ff02468a0000000), rawbits_to_float(0x7fc12345)},
+ {rawbits_to_double(0x7ff02468bfffffff), rawbits_to_float(0x7fc12345)},
+ {rawbits_to_double(0x7ff000001fffffff), rawbits_to_float(0x7fc00000)},
+ };
+ int count = sizeof(test) / sizeof(test[0]);
+
+ for (int i = 0; i < count; i++) {
+ double in = test[i].in;
+ float expected = test[i].expected;
+
+ // We only expect positive input.
+ ASSERT(std::signbit(in) == 0);
+ ASSERT(std::signbit(expected) == 0);
+
+ SETUP();
+ START();
+
+ __ Fmov(d10, in);
+ __ Fcvt(s20, d10);
+
+ __ Fmov(d11, -in);
+ __ Fcvt(s21, d11);
+
+ END();
+ RUN();
+ ASSERT_EQUAL_FP32(expected, s20);
+ ASSERT_EQUAL_FP32(-expected, s21);
+ TEARDOWN();
+ }
+}
+
+
+TEST(fcvtas) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s0, 1.0);
+ __ Fmov(s1, 1.1);
+ __ Fmov(s2, 2.5);
+ __ Fmov(s3, -2.5);
+ __ Fmov(s4, kFP32PositiveInfinity);
+ __ Fmov(s5, kFP32NegativeInfinity);
+ __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
+ __ Fneg(s7, s6); // Smallest float > INT32_MIN.
+ __ Fmov(d8, 1.0);
+ __ Fmov(d9, 1.1);
+ __ Fmov(d10, 2.5);
+ __ Fmov(d11, -2.5);
+ __ Fmov(d12, kFP64PositiveInfinity);
+ __ Fmov(d13, kFP64NegativeInfinity);
+ __ Fmov(d14, kWMaxInt - 1);
+ __ Fmov(d15, kWMinInt + 1);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 2.5);
+ __ Fmov(s19, -2.5);
+ __ Fmov(s20, kFP32PositiveInfinity);
+ __ Fmov(s21, kFP32NegativeInfinity);
+ __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
+ __ Fneg(s23, s22); // Smallest float > INT64_MIN.
+ __ Fmov(d24, 1.1);
+ __ Fmov(d25, 2.5);
+ __ Fmov(d26, -2.5);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
+ __ Fneg(d30, d29); // Smallest double > INT64_MIN.
+
+ __ Fcvtas(w0, s0);
+ __ Fcvtas(w1, s1);
+ __ Fcvtas(w2, s2);
+ __ Fcvtas(w3, s3);
+ __ Fcvtas(w4, s4);
+ __ Fcvtas(w5, s5);
+ __ Fcvtas(w6, s6);
+ __ Fcvtas(w7, s7);
+ __ Fcvtas(w8, d8);
+ __ Fcvtas(w9, d9);
+ __ Fcvtas(w10, d10);
+ __ Fcvtas(w11, d11);
+ __ Fcvtas(w12, d12);
+ __ Fcvtas(w13, d13);
+ __ Fcvtas(w14, d14);
+ __ Fcvtas(w15, d15);
+ __ Fcvtas(x17, s17);
+ __ Fcvtas(x18, s18);
+ __ Fcvtas(x19, s19);
+ __ Fcvtas(x20, s20);
+ __ Fcvtas(x21, s21);
+ __ Fcvtas(x22, s22);
+ __ Fcvtas(x23, s23);
+ __ Fcvtas(x24, d24);
+ __ Fcvtas(x25, d25);
+ __ Fcvtas(x26, d26);
+ __ Fcvtas(x27, d27);
+ __ Fcvtas(x28, d28);
+ __ Fcvtas(x29, d29);
+ __ Fcvtas(x30, d30);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(1, x1);
+ ASSERT_EQUAL_64(3, x2);
+ ASSERT_EQUAL_64(0xfffffffd, x3);
+ ASSERT_EQUAL_64(0x7fffffff, x4);
+ ASSERT_EQUAL_64(0x80000000, x5);
+ ASSERT_EQUAL_64(0x7fffff80, x6);
+ ASSERT_EQUAL_64(0x80000080, x7);
+ ASSERT_EQUAL_64(1, x8);
+ ASSERT_EQUAL_64(1, x9);
+ ASSERT_EQUAL_64(3, x10);
+ ASSERT_EQUAL_64(0xfffffffd, x11);
+ ASSERT_EQUAL_64(0x7fffffff, x12);
+ ASSERT_EQUAL_64(0x80000000, x13);
+ ASSERT_EQUAL_64(0x7ffffffe, x14);
+ ASSERT_EQUAL_64(0x80000001, x15);
+ ASSERT_EQUAL_64(1, x17);
+ ASSERT_EQUAL_64(3, x18);
+ ASSERT_EQUAL_64(0xfffffffffffffffdUL, x19);
+ ASSERT_EQUAL_64(0x7fffffffffffffffUL, x20);
+ ASSERT_EQUAL_64(0x8000000000000000UL, x21);
+ ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
+ ASSERT_EQUAL_64(0x8000008000000000UL, x23);
+ ASSERT_EQUAL_64(1, x24);
+ ASSERT_EQUAL_64(3, x25);
+ ASSERT_EQUAL_64(0xfffffffffffffffdUL, x26);
+ ASSERT_EQUAL_64(0x7fffffffffffffffUL, x27);
+ ASSERT_EQUAL_64(0x8000000000000000UL, x28);
+ ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
+ ASSERT_EQUAL_64(0x8000000000000400UL, x30);
+
+ TEARDOWN();
+}
+
+
+TEST(fcvtau) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s0, 1.0);
+ __ Fmov(s1, 1.1);
+ __ Fmov(s2, 2.5);
+ __ Fmov(s3, -2.5);
+ __ Fmov(s4, kFP32PositiveInfinity);
+ __ Fmov(s5, kFP32NegativeInfinity);
+ __ Fmov(s6, 0xffffff00); // Largest float < UINT32_MAX.
+ __ Fmov(d8, 1.0);
+ __ Fmov(d9, 1.1);
+ __ Fmov(d10, 2.5);
+ __ Fmov(d11, -2.5);
+ __ Fmov(d12, kFP64PositiveInfinity);
+ __ Fmov(d13, kFP64NegativeInfinity);
+ __ Fmov(d14, 0xfffffffe);
+ __ Fmov(s16, 1.0);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 2.5);
+ __ Fmov(s19, -2.5);
+ __ Fmov(s20, kFP32PositiveInfinity);
+ __ Fmov(s21, kFP32NegativeInfinity);
+ __ Fmov(s22, 0xffffff0000000000UL); // Largest float < UINT64_MAX.
+ __ Fmov(d24, 1.1);
+ __ Fmov(d25, 2.5);
+ __ Fmov(d26, -2.5);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0xfffffffffffff800UL); // Largest double < UINT64_MAX.
+ __ Fmov(s30, 0x100000000UL);
+
+ __ Fcvtau(w0, s0);
+ __ Fcvtau(w1, s1);
+ __ Fcvtau(w2, s2);
+ __ Fcvtau(w3, s3);
+ __ Fcvtau(w4, s4);
+ __ Fcvtau(w5, s5);
+ __ Fcvtau(w6, s6);
+ __ Fcvtau(w8, d8);
+ __ Fcvtau(w9, d9);
+ __ Fcvtau(w10, d10);
+ __ Fcvtau(w11, d11);
+ __ Fcvtau(w12, d12);
+ __ Fcvtau(w13, d13);
+ __ Fcvtau(w14, d14);
+ __ Fcvtau(w15, d15);
+ __ Fcvtau(x16, s16);
+ __ Fcvtau(x17, s17);
+ __ Fcvtau(x18, s18);
+ __ Fcvtau(x19, s19);
+ __ Fcvtau(x20, s20);
+ __ Fcvtau(x21, s21);
+ __ Fcvtau(x22, s22);
+ __ Fcvtau(x24, d24);
+ __ Fcvtau(x25, d25);
+ __ Fcvtau(x26, d26);
+ __ Fcvtau(x27, d27);
+ __ Fcvtau(x28, d28);
+ __ Fcvtau(x29, d29);
+ __ Fcvtau(w30, s30);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(1, x1);
+ ASSERT_EQUAL_64(3, x2);
+ ASSERT_EQUAL_64(0, x3);
+ ASSERT_EQUAL_64(0xffffffff, x4);
+ ASSERT_EQUAL_64(0, x5);
+ ASSERT_EQUAL_64(0xffffff00, x6);
+ ASSERT_EQUAL_64(1, x8);
+ ASSERT_EQUAL_64(1, x9);
+ ASSERT_EQUAL_64(3, x10);
+ ASSERT_EQUAL_64(0, x11);
+ ASSERT_EQUAL_64(0xffffffff, x12);
+ ASSERT_EQUAL_64(0, x13);
+ ASSERT_EQUAL_64(0xfffffffe, x14);
+ ASSERT_EQUAL_64(1, x16);
+ ASSERT_EQUAL_64(1, x17);
+ ASSERT_EQUAL_64(3, x18);
+ ASSERT_EQUAL_64(0, x19);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x20);
+ ASSERT_EQUAL_64(0, x21);
+ ASSERT_EQUAL_64(0xffffff0000000000UL, x22);
+ ASSERT_EQUAL_64(1, x24);
+ ASSERT_EQUAL_64(3, x25);
+ ASSERT_EQUAL_64(0, x26);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x27);
+ ASSERT_EQUAL_64(0, x28);
+ ASSERT_EQUAL_64(0xfffffffffffff800UL, x29);
+ ASSERT_EQUAL_64(0xffffffff, x30);
+
+ TEARDOWN();
+}
+
+
+TEST(fcvtms) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s0, 1.0);
+ __ Fmov(s1, 1.1);
+ __ Fmov(s2, 1.5);
+ __ Fmov(s3, -1.5);
+ __ Fmov(s4, kFP32PositiveInfinity);
+ __ Fmov(s5, kFP32NegativeInfinity);
+ __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
+ __ Fneg(s7, s6); // Smallest float > INT32_MIN.
+ __ Fmov(d8, 1.0);
+ __ Fmov(d9, 1.1);
+ __ Fmov(d10, 1.5);
+ __ Fmov(d11, -1.5);
+ __ Fmov(d12, kFP64PositiveInfinity);
+ __ Fmov(d13, kFP64NegativeInfinity);
+ __ Fmov(d14, kWMaxInt - 1);
+ __ Fmov(d15, kWMinInt + 1);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 1.5);
+ __ Fmov(s19, -1.5);
+ __ Fmov(s20, kFP32PositiveInfinity);
+ __ Fmov(s21, kFP32NegativeInfinity);
+ __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
+ __ Fneg(s23, s22); // Smallest float > INT64_MIN.
+ __ Fmov(d24, 1.1);
+ __ Fmov(d25, 1.5);
+ __ Fmov(d26, -1.5);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
+ __ Fneg(d30, d29); // Smallest double > INT64_MIN.
+
+ __ Fcvtms(w0, s0);
+ __ Fcvtms(w1, s1);
+ __ Fcvtms(w2, s2);
+ __ Fcvtms(w3, s3);
+ __ Fcvtms(w4, s4);
+ __ Fcvtms(w5, s5);
+ __ Fcvtms(w6, s6);
+ __ Fcvtms(w7, s7);
+ __ Fcvtms(w8, d8);
+ __ Fcvtms(w9, d9);
+ __ Fcvtms(w10, d10);
+ __ Fcvtms(w11, d11);
+ __ Fcvtms(w12, d12);
+ __ Fcvtms(w13, d13);
+ __ Fcvtms(w14, d14);
+ __ Fcvtms(w15, d15);
+ __ Fcvtms(x17, s17);
+ __ Fcvtms(x18, s18);
+ __ Fcvtms(x19, s19);
+ __ Fcvtms(x20, s20);
+ __ Fcvtms(x21, s21);
+ __ Fcvtms(x22, s22);
+ __ Fcvtms(x23, s23);
+ __ Fcvtms(x24, d24);
+ __ Fcvtms(x25, d25);
+ __ Fcvtms(x26, d26);
+ __ Fcvtms(x27, d27);
+ __ Fcvtms(x28, d28);
+ __ Fcvtms(x29, d29);
+ __ Fcvtms(x30, d30);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(1, x1);
+ ASSERT_EQUAL_64(1, x2);
+ ASSERT_EQUAL_64(0xfffffffe, x3);
+ ASSERT_EQUAL_64(0x7fffffff, x4);
+ ASSERT_EQUAL_64(0x80000000, x5);
+ ASSERT_EQUAL_64(0x7fffff80, x6);
+ ASSERT_EQUAL_64(0x80000080, x7);
+ ASSERT_EQUAL_64(1, x8);
+ ASSERT_EQUAL_64(1, x9);
+ ASSERT_EQUAL_64(1, x10);
+ ASSERT_EQUAL_64(0xfffffffe, x11);
+ ASSERT_EQUAL_64(0x7fffffff, x12);
+ ASSERT_EQUAL_64(0x80000000, x13);
+ ASSERT_EQUAL_64(0x7ffffffe, x14);
+ ASSERT_EQUAL_64(0x80000001, x15);
+ ASSERT_EQUAL_64(1, x17);
+ ASSERT_EQUAL_64(1, x18);
+ ASSERT_EQUAL_64(0xfffffffffffffffeUL, x19);
+ ASSERT_EQUAL_64(0x7fffffffffffffffUL, x20);
+ ASSERT_EQUAL_64(0x8000000000000000UL, x21);
+ ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
+ ASSERT_EQUAL_64(0x8000008000000000UL, x23);
+ ASSERT_EQUAL_64(1, x24);
+ ASSERT_EQUAL_64(1, x25);
+ ASSERT_EQUAL_64(0xfffffffffffffffeUL, x26);
+ ASSERT_EQUAL_64(0x7fffffffffffffffUL, x27);
+ ASSERT_EQUAL_64(0x8000000000000000UL, x28);
+ ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
+ ASSERT_EQUAL_64(0x8000000000000400UL, x30);
+
+ TEARDOWN();
+}
+
+
+TEST(fcvtmu) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s0, 1.0);
+ __ Fmov(s1, 1.1);
+ __ Fmov(s2, 1.5);
+ __ Fmov(s3, -1.5);
+ __ Fmov(s4, kFP32PositiveInfinity);
+ __ Fmov(s5, kFP32NegativeInfinity);
+ __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
+ __ Fneg(s7, s6); // Smallest float > INT32_MIN.
+ __ Fmov(d8, 1.0);
+ __ Fmov(d9, 1.1);
+ __ Fmov(d10, 1.5);
+ __ Fmov(d11, -1.5);
+ __ Fmov(d12, kFP64PositiveInfinity);
+ __ Fmov(d13, kFP64NegativeInfinity);
+ __ Fmov(d14, kWMaxInt - 1);
+ __ Fmov(d15, kWMinInt + 1);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 1.5);
+ __ Fmov(s19, -1.5);
+ __ Fmov(s20, kFP32PositiveInfinity);
+ __ Fmov(s21, kFP32NegativeInfinity);
+ __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
+ __ Fneg(s23, s22); // Smallest float > INT64_MIN.
+ __ Fmov(d24, 1.1);
+ __ Fmov(d25, 1.5);
+ __ Fmov(d26, -1.5);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
+ __ Fneg(d30, d29); // Smallest double > INT64_MIN.
+
+ __ Fcvtmu(w0, s0);
+ __ Fcvtmu(w1, s1);
+ __ Fcvtmu(w2, s2);
+ __ Fcvtmu(w3, s3);
+ __ Fcvtmu(w4, s4);
+ __ Fcvtmu(w5, s5);
+ __ Fcvtmu(w6, s6);
+ __ Fcvtmu(w7, s7);
+ __ Fcvtmu(w8, d8);
+ __ Fcvtmu(w9, d9);
+ __ Fcvtmu(w10, d10);
+ __ Fcvtmu(w11, d11);
+ __ Fcvtmu(w12, d12);
+ __ Fcvtmu(w13, d13);
+ __ Fcvtmu(w14, d14);
+ __ Fcvtmu(x17, s17);
+ __ Fcvtmu(x18, s18);
+ __ Fcvtmu(x19, s19);
+ __ Fcvtmu(x20, s20);
+ __ Fcvtmu(x21, s21);
+ __ Fcvtmu(x22, s22);
+ __ Fcvtmu(x23, s23);
+ __ Fcvtmu(x24, d24);
+ __ Fcvtmu(x25, d25);
+ __ Fcvtmu(x26, d26);
+ __ Fcvtmu(x27, d27);
+ __ Fcvtmu(x28, d28);
+ __ Fcvtmu(x29, d29);
+ __ Fcvtmu(x30, d30);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(1, x1);
+ ASSERT_EQUAL_64(1, x2);
+ ASSERT_EQUAL_64(0, x3);
+ ASSERT_EQUAL_64(0xffffffff, x4);
+ ASSERT_EQUAL_64(0, x5);
+ ASSERT_EQUAL_64(0x7fffff80, x6);
+ ASSERT_EQUAL_64(0, x7);
+ ASSERT_EQUAL_64(1, x8);
+ ASSERT_EQUAL_64(1, x9);
+ ASSERT_EQUAL_64(1, x10);
+ ASSERT_EQUAL_64(0, x11);
+ ASSERT_EQUAL_64(0xffffffff, x12);
+ ASSERT_EQUAL_64(0, x13);
+ ASSERT_EQUAL_64(0x7ffffffe, x14);
+ ASSERT_EQUAL_64(1, x17);
+ ASSERT_EQUAL_64(1, x18);
+ ASSERT_EQUAL_64(0x0UL, x19);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x20);
+ ASSERT_EQUAL_64(0x0UL, x21);
+ ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
+ ASSERT_EQUAL_64(0x0UL, x23);
+ ASSERT_EQUAL_64(1, x24);
+ ASSERT_EQUAL_64(1, x25);
+ ASSERT_EQUAL_64(0x0UL, x26);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x27);
+ ASSERT_EQUAL_64(0x0UL, x28);
+ ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
+ ASSERT_EQUAL_64(0x0UL, x30);
+
+ TEARDOWN();
+}
+
+
+TEST(fcvtns) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s0, 1.0);
+ __ Fmov(s1, 1.1);
+ __ Fmov(s2, 1.5);
+ __ Fmov(s3, -1.5);
+ __ Fmov(s4, kFP32PositiveInfinity);
+ __ Fmov(s5, kFP32NegativeInfinity);
+ __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
+ __ Fneg(s7, s6); // Smallest float > INT32_MIN.
+ __ Fmov(d8, 1.0);
+ __ Fmov(d9, 1.1);
+ __ Fmov(d10, 1.5);
+ __ Fmov(d11, -1.5);
+ __ Fmov(d12, kFP64PositiveInfinity);
+ __ Fmov(d13, kFP64NegativeInfinity);
+ __ Fmov(d14, kWMaxInt - 1);
+ __ Fmov(d15, kWMinInt + 1);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 1.5);
+ __ Fmov(s19, -1.5);
+ __ Fmov(s20, kFP32PositiveInfinity);
+ __ Fmov(s21, kFP32NegativeInfinity);
+ __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
+ __ Fneg(s23, s22); // Smallest float > INT64_MIN.
+ __ Fmov(d24, 1.1);
+ __ Fmov(d25, 1.5);
+ __ Fmov(d26, -1.5);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
+ __ Fneg(d30, d29); // Smallest double > INT64_MIN.
+
+ __ Fcvtns(w0, s0);
+ __ Fcvtns(w1, s1);
+ __ Fcvtns(w2, s2);
+ __ Fcvtns(w3, s3);
+ __ Fcvtns(w4, s4);
+ __ Fcvtns(w5, s5);
+ __ Fcvtns(w6, s6);
+ __ Fcvtns(w7, s7);
+ __ Fcvtns(w8, d8);
+ __ Fcvtns(w9, d9);
+ __ Fcvtns(w10, d10);
+ __ Fcvtns(w11, d11);
+ __ Fcvtns(w12, d12);
+ __ Fcvtns(w13, d13);
+ __ Fcvtns(w14, d14);
+ __ Fcvtns(w15, d15);
+ __ Fcvtns(x17, s17);
+ __ Fcvtns(x18, s18);
+ __ Fcvtns(x19, s19);
+ __ Fcvtns(x20, s20);
+ __ Fcvtns(x21, s21);
+ __ Fcvtns(x22, s22);
+ __ Fcvtns(x23, s23);
+ __ Fcvtns(x24, d24);
+ __ Fcvtns(x25, d25);
+ __ Fcvtns(x26, d26);
+ __ Fcvtns(x27, d27);
+// __ Fcvtns(x28, d28);
+ __ Fcvtns(x29, d29);
+ __ Fcvtns(x30, d30);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(1, x1);
+ ASSERT_EQUAL_64(2, x2);
+ ASSERT_EQUAL_64(0xfffffffe, x3);
+ ASSERT_EQUAL_64(0x7fffffff, x4);
+ ASSERT_EQUAL_64(0x80000000, x5);
+ ASSERT_EQUAL_64(0x7fffff80, x6);
+ ASSERT_EQUAL_64(0x80000080, x7);
+ ASSERT_EQUAL_64(1, x8);
+ ASSERT_EQUAL_64(1, x9);
+ ASSERT_EQUAL_64(2, x10);
+ ASSERT_EQUAL_64(0xfffffffe, x11);
+ ASSERT_EQUAL_64(0x7fffffff, x12);
+ ASSERT_EQUAL_64(0x80000000, x13);
+ ASSERT_EQUAL_64(0x7ffffffe, x14);
+ ASSERT_EQUAL_64(0x80000001, x15);
+ ASSERT_EQUAL_64(1, x17);
+ ASSERT_EQUAL_64(2, x18);
+ ASSERT_EQUAL_64(0xfffffffffffffffeUL, x19);
+ ASSERT_EQUAL_64(0x7fffffffffffffffUL, x20);
+ ASSERT_EQUAL_64(0x8000000000000000UL, x21);
+ ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
+ ASSERT_EQUAL_64(0x8000008000000000UL, x23);
+ ASSERT_EQUAL_64(1, x24);
+ ASSERT_EQUAL_64(2, x25);
+ ASSERT_EQUAL_64(0xfffffffffffffffeUL, x26);
+ ASSERT_EQUAL_64(0x7fffffffffffffffUL, x27);
+// ASSERT_EQUAL_64(0x8000000000000000UL, x28);
+ ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
+ ASSERT_EQUAL_64(0x8000000000000400UL, x30);
+
+ TEARDOWN();
+}
+
+
+TEST(fcvtnu) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s0, 1.0);
+ __ Fmov(s1, 1.1);
+ __ Fmov(s2, 1.5);
+ __ Fmov(s3, -1.5);
+ __ Fmov(s4, kFP32PositiveInfinity);
+ __ Fmov(s5, kFP32NegativeInfinity);
+ __ Fmov(s6, 0xffffff00); // Largest float < UINT32_MAX.
+ __ Fmov(d8, 1.0);
+ __ Fmov(d9, 1.1);
+ __ Fmov(d10, 1.5);
+ __ Fmov(d11, -1.5);
+ __ Fmov(d12, kFP64PositiveInfinity);
+ __ Fmov(d13, kFP64NegativeInfinity);
+ __ Fmov(d14, 0xfffffffe);
+ __ Fmov(s16, 1.0);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 1.5);
+ __ Fmov(s19, -1.5);
+ __ Fmov(s20, kFP32PositiveInfinity);
+ __ Fmov(s21, kFP32NegativeInfinity);
+ __ Fmov(s22, 0xffffff0000000000UL); // Largest float < UINT64_MAX.
+ __ Fmov(d24, 1.1);
+ __ Fmov(d25, 1.5);
+ __ Fmov(d26, -1.5);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0xfffffffffffff800UL); // Largest double < UINT64_MAX.
+ __ Fmov(s30, 0x100000000UL);
+
+ __ Fcvtnu(w0, s0);
+ __ Fcvtnu(w1, s1);
+ __ Fcvtnu(w2, s2);
+ __ Fcvtnu(w3, s3);
+ __ Fcvtnu(w4, s4);
+ __ Fcvtnu(w5, s5);
+ __ Fcvtnu(w6, s6);
+ __ Fcvtnu(w8, d8);
+ __ Fcvtnu(w9, d9);
+ __ Fcvtnu(w10, d10);
+ __ Fcvtnu(w11, d11);
+ __ Fcvtnu(w12, d12);
+ __ Fcvtnu(w13, d13);
+ __ Fcvtnu(w14, d14);
+ __ Fcvtnu(w15, d15);
+ __ Fcvtnu(x16, s16);
+ __ Fcvtnu(x17, s17);
+ __ Fcvtnu(x18, s18);
+ __ Fcvtnu(x19, s19);
+ __ Fcvtnu(x20, s20);
+ __ Fcvtnu(x21, s21);
+ __ Fcvtnu(x22, s22);
+ __ Fcvtnu(x24, d24);
+ __ Fcvtnu(x25, d25);
+ __ Fcvtnu(x26, d26);
+ __ Fcvtnu(x27, d27);
+// __ Fcvtnu(x28, d28);
+ __ Fcvtnu(x29, d29);
+ __ Fcvtnu(w30, s30);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(1, x1);
+ ASSERT_EQUAL_64(2, x2);
+ ASSERT_EQUAL_64(0, x3);
+ ASSERT_EQUAL_64(0xffffffff, x4);
+ ASSERT_EQUAL_64(0, x5);
+ ASSERT_EQUAL_64(0xffffff00, x6);
+ ASSERT_EQUAL_64(1, x8);
+ ASSERT_EQUAL_64(1, x9);
+ ASSERT_EQUAL_64(2, x10);
+ ASSERT_EQUAL_64(0, x11);
+ ASSERT_EQUAL_64(0xffffffff, x12);
+ ASSERT_EQUAL_64(0, x13);
+ ASSERT_EQUAL_64(0xfffffffe, x14);
+ ASSERT_EQUAL_64(1, x16);
+ ASSERT_EQUAL_64(1, x17);
+ ASSERT_EQUAL_64(2, x18);
+ ASSERT_EQUAL_64(0, x19);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x20);
+ ASSERT_EQUAL_64(0, x21);
+ ASSERT_EQUAL_64(0xffffff0000000000UL, x22);
+ ASSERT_EQUAL_64(1, x24);
+ ASSERT_EQUAL_64(2, x25);
+ ASSERT_EQUAL_64(0, x26);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x27);
+// ASSERT_EQUAL_64(0, x28);
+ ASSERT_EQUAL_64(0xfffffffffffff800UL, x29);
+ ASSERT_EQUAL_64(0xffffffff, x30);
+
+ TEARDOWN();
+}
+
+
+TEST(fcvtzs) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s0, 1.0);
+ __ Fmov(s1, 1.1);
+ __ Fmov(s2, 1.5);
+ __ Fmov(s3, -1.5);
+ __ Fmov(s4, kFP32PositiveInfinity);
+ __ Fmov(s5, kFP32NegativeInfinity);
+ __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
+ __ Fneg(s7, s6); // Smallest float > INT32_MIN.
+ __ Fmov(d8, 1.0);
+ __ Fmov(d9, 1.1);
+ __ Fmov(d10, 1.5);
+ __ Fmov(d11, -1.5);
+ __ Fmov(d12, kFP64PositiveInfinity);
+ __ Fmov(d13, kFP64NegativeInfinity);
+ __ Fmov(d14, kWMaxInt - 1);
+ __ Fmov(d15, kWMinInt + 1);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 1.5);
+ __ Fmov(s19, -1.5);
+ __ Fmov(s20, kFP32PositiveInfinity);
+ __ Fmov(s21, kFP32NegativeInfinity);
+ __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
+ __ Fneg(s23, s22); // Smallest float > INT64_MIN.
+ __ Fmov(d24, 1.1);
+ __ Fmov(d25, 1.5);
+ __ Fmov(d26, -1.5);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
+ __ Fneg(d30, d29); // Smallest double > INT64_MIN.
+
+ __ Fcvtzs(w0, s0);
+ __ Fcvtzs(w1, s1);
+ __ Fcvtzs(w2, s2);
+ __ Fcvtzs(w3, s3);
+ __ Fcvtzs(w4, s4);
+ __ Fcvtzs(w5, s5);
+ __ Fcvtzs(w6, s6);
+ __ Fcvtzs(w7, s7);
+ __ Fcvtzs(w8, d8);
+ __ Fcvtzs(w9, d9);
+ __ Fcvtzs(w10, d10);
+ __ Fcvtzs(w11, d11);
+ __ Fcvtzs(w12, d12);
+ __ Fcvtzs(w13, d13);
+ __ Fcvtzs(w14, d14);
+ __ Fcvtzs(w15, d15);
+ __ Fcvtzs(x17, s17);
+ __ Fcvtzs(x18, s18);
+ __ Fcvtzs(x19, s19);
+ __ Fcvtzs(x20, s20);
+ __ Fcvtzs(x21, s21);
+ __ Fcvtzs(x22, s22);
+ __ Fcvtzs(x23, s23);
+ __ Fcvtzs(x24, d24);
+ __ Fcvtzs(x25, d25);
+ __ Fcvtzs(x26, d26);
+ __ Fcvtzs(x27, d27);
+ __ Fcvtzs(x28, d28);
+ __ Fcvtzs(x29, d29);
+ __ Fcvtzs(x30, d30);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(1, x1);
+ ASSERT_EQUAL_64(1, x2);
+ ASSERT_EQUAL_64(0xffffffff, x3);
+ ASSERT_EQUAL_64(0x7fffffff, x4);
+ ASSERT_EQUAL_64(0x80000000, x5);
+ ASSERT_EQUAL_64(0x7fffff80, x6);
+ ASSERT_EQUAL_64(0x80000080, x7);
+ ASSERT_EQUAL_64(1, x8);
+ ASSERT_EQUAL_64(1, x9);
+ ASSERT_EQUAL_64(1, x10);
+ ASSERT_EQUAL_64(0xffffffff, x11);
+ ASSERT_EQUAL_64(0x7fffffff, x12);
+ ASSERT_EQUAL_64(0x80000000, x13);
+ ASSERT_EQUAL_64(0x7ffffffe, x14);
+ ASSERT_EQUAL_64(0x80000001, x15);
+ ASSERT_EQUAL_64(1, x17);
+ ASSERT_EQUAL_64(1, x18);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x19);
+ ASSERT_EQUAL_64(0x7fffffffffffffffUL, x20);
+ ASSERT_EQUAL_64(0x8000000000000000UL, x21);
+ ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
+ ASSERT_EQUAL_64(0x8000008000000000UL, x23);
+ ASSERT_EQUAL_64(1, x24);
+ ASSERT_EQUAL_64(1, x25);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x26);
+ ASSERT_EQUAL_64(0x7fffffffffffffffUL, x27);
+ ASSERT_EQUAL_64(0x8000000000000000UL, x28);
+ ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
+ ASSERT_EQUAL_64(0x8000000000000400UL, x30);
+
+ TEARDOWN();
+}
+
+
+TEST(fcvtzu) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s0, 1.0);
+ __ Fmov(s1, 1.1);
+ __ Fmov(s2, 1.5);
+ __ Fmov(s3, -1.5);
+ __ Fmov(s4, kFP32PositiveInfinity);
+ __ Fmov(s5, kFP32NegativeInfinity);
+ __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
+ __ Fneg(s7, s6); // Smallest float > INT32_MIN.
+ __ Fmov(d8, 1.0);
+ __ Fmov(d9, 1.1);
+ __ Fmov(d10, 1.5);
+ __ Fmov(d11, -1.5);
+ __ Fmov(d12, kFP64PositiveInfinity);
+ __ Fmov(d13, kFP64NegativeInfinity);
+ __ Fmov(d14, kWMaxInt - 1);
+ __ Fmov(d15, kWMinInt + 1);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 1.5);
+ __ Fmov(s19, -1.5);
+ __ Fmov(s20, kFP32PositiveInfinity);
+ __ Fmov(s21, kFP32NegativeInfinity);
+ __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
+ __ Fneg(s23, s22); // Smallest float > INT64_MIN.
+ __ Fmov(d24, 1.1);
+ __ Fmov(d25, 1.5);
+ __ Fmov(d26, -1.5);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
+ __ Fneg(d30, d29); // Smallest double > INT64_MIN.
+
+ __ Fcvtzu(w0, s0);
+ __ Fcvtzu(w1, s1);
+ __ Fcvtzu(w2, s2);
+ __ Fcvtzu(w3, s3);
+ __ Fcvtzu(w4, s4);
+ __ Fcvtzu(w5, s5);
+ __ Fcvtzu(w6, s6);
+ __ Fcvtzu(w7, s7);
+ __ Fcvtzu(w8, d8);
+ __ Fcvtzu(w9, d9);
+ __ Fcvtzu(w10, d10);
+ __ Fcvtzu(w11, d11);
+ __ Fcvtzu(w12, d12);
+ __ Fcvtzu(w13, d13);
+ __ Fcvtzu(w14, d14);
+ __ Fcvtzu(x17, s17);
+ __ Fcvtzu(x18, s18);
+ __ Fcvtzu(x19, s19);
+ __ Fcvtzu(x20, s20);
+ __ Fcvtzu(x21, s21);
+ __ Fcvtzu(x22, s22);
+ __ Fcvtzu(x23, s23);
+ __ Fcvtzu(x24, d24);
+ __ Fcvtzu(x25, d25);
+ __ Fcvtzu(x26, d26);
+ __ Fcvtzu(x27, d27);
+ __ Fcvtzu(x28, d28);
+ __ Fcvtzu(x29, d29);
+ __ Fcvtzu(x30, d30);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(1, x1);
+ ASSERT_EQUAL_64(1, x2);
+ ASSERT_EQUAL_64(0, x3);
+ ASSERT_EQUAL_64(0xffffffff, x4);
+ ASSERT_EQUAL_64(0, x5);
+ ASSERT_EQUAL_64(0x7fffff80, x6);
+ ASSERT_EQUAL_64(0, x7);
+ ASSERT_EQUAL_64(1, x8);
+ ASSERT_EQUAL_64(1, x9);
+ ASSERT_EQUAL_64(1, x10);
+ ASSERT_EQUAL_64(0, x11);
+ ASSERT_EQUAL_64(0xffffffff, x12);
+ ASSERT_EQUAL_64(0, x13);
+ ASSERT_EQUAL_64(0x7ffffffe, x14);
+ ASSERT_EQUAL_64(1, x17);
+ ASSERT_EQUAL_64(1, x18);
+ ASSERT_EQUAL_64(0x0UL, x19);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x20);
+ ASSERT_EQUAL_64(0x0UL, x21);
+ ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
+ ASSERT_EQUAL_64(0x0UL, x23);
+ ASSERT_EQUAL_64(1, x24);
+ ASSERT_EQUAL_64(1, x25);
+ ASSERT_EQUAL_64(0x0UL, x26);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x27);
+ ASSERT_EQUAL_64(0x0UL, x28);
+ ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
+ ASSERT_EQUAL_64(0x0UL, x30);
+
+ TEARDOWN();
+}
+
+
+// Test that scvtf and ucvtf can convert the 64-bit input into the expected
+// value. All possible values of 'fbits' are tested. The expected value is
+// modified accordingly in each case.
+//
+// The expected value is specified as the bit encoding of the expected double
+// produced by scvtf (expected_scvtf_bits) as well as ucvtf
+// (expected_ucvtf_bits).
+//
+// Where the input value is representable by int32_t or uint32_t, conversions
+// from W registers will also be tested.
+static void TestUScvtfHelper(uint64_t in,
+ uint64_t expected_scvtf_bits,
+ uint64_t expected_ucvtf_bits) {
+ uint64_t u64 = in;
+ uint32_t u32 = u64 & 0xffffffff;
+ int64_t s64 = static_cast<int64_t>(in);
+ int32_t s32 = s64 & 0x7fffffff;
+
+ bool cvtf_s32 = (s64 == s32);
+ bool cvtf_u32 = (u64 == u32);
+
+ double results_scvtf_x[65];
+ double results_ucvtf_x[65];
+ double results_scvtf_w[33];
+ double results_ucvtf_w[33];
+
+ SETUP();
+ START();
+
+ __ Mov(x0, reinterpret_cast<int64_t>(results_scvtf_x));
+ __ Mov(x1, reinterpret_cast<int64_t>(results_ucvtf_x));
+ __ Mov(x2, reinterpret_cast<int64_t>(results_scvtf_w));
+ __ Mov(x3, reinterpret_cast<int64_t>(results_ucvtf_w));
+
+ __ Mov(x10, s64);
+
+ // Corrupt the top word, in case it is accidentally used during W-register
+ // conversions.
+ __ Mov(x11, 0x5555555555555555);
+ __ Bfi(x11, x10, 0, kWRegSizeInBits);
+
+ // Test integer conversions.
+ __ Scvtf(d0, x10);
+ __ Ucvtf(d1, x10);
+ __ Scvtf(d2, w11);
+ __ Ucvtf(d3, w11);
+ __ Str(d0, MemOperand(x0));
+ __ Str(d1, MemOperand(x1));
+ __ Str(d2, MemOperand(x2));
+ __ Str(d3, MemOperand(x3));
+
+ // Test all possible values of fbits.
+ for (int fbits = 1; fbits <= 32; fbits++) {
+ __ Scvtf(d0, x10, fbits);
+ __ Ucvtf(d1, x10, fbits);
+ __ Scvtf(d2, w11, fbits);
+ __ Ucvtf(d3, w11, fbits);
+ __ Str(d0, MemOperand(x0, fbits * kDRegSize));
+ __ Str(d1, MemOperand(x1, fbits * kDRegSize));
+ __ Str(d2, MemOperand(x2, fbits * kDRegSize));
+ __ Str(d3, MemOperand(x3, fbits * kDRegSize));
+ }
+
+ // Conversions from W registers can only handle fbits values <= 32, so just
+ // test conversions from X registers for 32 < fbits <= 64.
+ for (int fbits = 33; fbits <= 64; fbits++) {
+ __ Scvtf(d0, x10, fbits);
+ __ Ucvtf(d1, x10, fbits);
+ __ Str(d0, MemOperand(x0, fbits * kDRegSize));
+ __ Str(d1, MemOperand(x1, fbits * kDRegSize));
+ }
+
+ END();
+ RUN();
+
+ // Check the results.
+ double expected_scvtf_base = rawbits_to_double(expected_scvtf_bits);
+ double expected_ucvtf_base = rawbits_to_double(expected_ucvtf_bits);
+
+ for (int fbits = 0; fbits <= 32; fbits++) {
+ double expected_scvtf = expected_scvtf_base / pow(2.0, fbits);
+ double expected_ucvtf = expected_ucvtf_base / pow(2.0, fbits);
+ ASSERT_EQUAL_FP64(expected_scvtf, results_scvtf_x[fbits]);
+ ASSERT_EQUAL_FP64(expected_ucvtf, results_ucvtf_x[fbits]);
+ if (cvtf_s32) ASSERT_EQUAL_FP64(expected_scvtf, results_scvtf_w[fbits]);
+ if (cvtf_u32) ASSERT_EQUAL_FP64(expected_ucvtf, results_ucvtf_w[fbits]);
+ }
+ for (int fbits = 33; fbits <= 64; fbits++) {
+ double expected_scvtf = expected_scvtf_base / pow(2.0, fbits);
+ double expected_ucvtf = expected_ucvtf_base / pow(2.0, fbits);
+ ASSERT_EQUAL_FP64(expected_scvtf, results_scvtf_x[fbits]);
+ ASSERT_EQUAL_FP64(expected_ucvtf, results_ucvtf_x[fbits]);
+ }
+
+ TEARDOWN();
+}
+
+
+TEST(scvtf_ucvtf_double) {
+ INIT_V8();
+ // Simple conversions of positive numbers which require no rounding; the
+ // results should not depened on the rounding mode, and ucvtf and scvtf should
+ // produce the same result.
+ TestUScvtfHelper(0x0000000000000000, 0x0000000000000000, 0x0000000000000000);
+ TestUScvtfHelper(0x0000000000000001, 0x3ff0000000000000, 0x3ff0000000000000);
+ TestUScvtfHelper(0x0000000040000000, 0x41d0000000000000, 0x41d0000000000000);
+ TestUScvtfHelper(0x0000000100000000, 0x41f0000000000000, 0x41f0000000000000);
+ TestUScvtfHelper(0x4000000000000000, 0x43d0000000000000, 0x43d0000000000000);
+ // Test mantissa extremities.
+ TestUScvtfHelper(0x4000000000000400, 0x43d0000000000001, 0x43d0000000000001);
+ // The largest int32_t that fits in a double.
+ TestUScvtfHelper(0x000000007fffffff, 0x41dfffffffc00000, 0x41dfffffffc00000);
+ // Values that would be negative if treated as an int32_t.
+ TestUScvtfHelper(0x00000000ffffffff, 0x41efffffffe00000, 0x41efffffffe00000);
+ TestUScvtfHelper(0x0000000080000000, 0x41e0000000000000, 0x41e0000000000000);
+ TestUScvtfHelper(0x0000000080000001, 0x41e0000000200000, 0x41e0000000200000);
+ // The largest int64_t that fits in a double.
+ TestUScvtfHelper(0x7ffffffffffffc00, 0x43dfffffffffffff, 0x43dfffffffffffff);
+ // Check for bit pattern reproduction.
+ TestUScvtfHelper(0x0123456789abcde0, 0x43723456789abcde, 0x43723456789abcde);
+ TestUScvtfHelper(0x0000000012345678, 0x41b2345678000000, 0x41b2345678000000);
+
+ // Simple conversions of negative int64_t values. These require no rounding,
+ // and the results should not depend on the rounding mode.
+ TestUScvtfHelper(0xffffffffc0000000, 0xc1d0000000000000, 0x43effffffff80000);
+ TestUScvtfHelper(0xffffffff00000000, 0xc1f0000000000000, 0x43efffffffe00000);
+ TestUScvtfHelper(0xc000000000000000, 0xc3d0000000000000, 0x43e8000000000000);
+
+ // Conversions which require rounding.
+ TestUScvtfHelper(0x1000000000000000, 0x43b0000000000000, 0x43b0000000000000);
+ TestUScvtfHelper(0x1000000000000001, 0x43b0000000000000, 0x43b0000000000000);
+ TestUScvtfHelper(0x1000000000000080, 0x43b0000000000000, 0x43b0000000000000);
+ TestUScvtfHelper(0x1000000000000081, 0x43b0000000000001, 0x43b0000000000001);
+ TestUScvtfHelper(0x1000000000000100, 0x43b0000000000001, 0x43b0000000000001);
+ TestUScvtfHelper(0x1000000000000101, 0x43b0000000000001, 0x43b0000000000001);
+ TestUScvtfHelper(0x1000000000000180, 0x43b0000000000002, 0x43b0000000000002);
+ TestUScvtfHelper(0x1000000000000181, 0x43b0000000000002, 0x43b0000000000002);
+ TestUScvtfHelper(0x1000000000000200, 0x43b0000000000002, 0x43b0000000000002);
+ TestUScvtfHelper(0x1000000000000201, 0x43b0000000000002, 0x43b0000000000002);
+ TestUScvtfHelper(0x1000000000000280, 0x43b0000000000002, 0x43b0000000000002);
+ TestUScvtfHelper(0x1000000000000281, 0x43b0000000000003, 0x43b0000000000003);
+ TestUScvtfHelper(0x1000000000000300, 0x43b0000000000003, 0x43b0000000000003);
+ // Check rounding of negative int64_t values (and large uint64_t values).
+ TestUScvtfHelper(0x8000000000000000, 0xc3e0000000000000, 0x43e0000000000000);
+ TestUScvtfHelper(0x8000000000000001, 0xc3e0000000000000, 0x43e0000000000000);
+ TestUScvtfHelper(0x8000000000000200, 0xc3e0000000000000, 0x43e0000000000000);
+ TestUScvtfHelper(0x8000000000000201, 0xc3dfffffffffffff, 0x43e0000000000000);
+ TestUScvtfHelper(0x8000000000000400, 0xc3dfffffffffffff, 0x43e0000000000000);
+ TestUScvtfHelper(0x8000000000000401, 0xc3dfffffffffffff, 0x43e0000000000001);
+ TestUScvtfHelper(0x8000000000000600, 0xc3dffffffffffffe, 0x43e0000000000001);
+ TestUScvtfHelper(0x8000000000000601, 0xc3dffffffffffffe, 0x43e0000000000001);
+ TestUScvtfHelper(0x8000000000000800, 0xc3dffffffffffffe, 0x43e0000000000001);
+ TestUScvtfHelper(0x8000000000000801, 0xc3dffffffffffffe, 0x43e0000000000001);
+ TestUScvtfHelper(0x8000000000000a00, 0xc3dffffffffffffe, 0x43e0000000000001);
+ TestUScvtfHelper(0x8000000000000a01, 0xc3dffffffffffffd, 0x43e0000000000001);
+ TestUScvtfHelper(0x8000000000000c00, 0xc3dffffffffffffd, 0x43e0000000000002);
+ // Round up to produce a result that's too big for the input to represent.
+ TestUScvtfHelper(0x7ffffffffffffe00, 0x43e0000000000000, 0x43e0000000000000);
+ TestUScvtfHelper(0x7fffffffffffffff, 0x43e0000000000000, 0x43e0000000000000);
+ TestUScvtfHelper(0xfffffffffffffc00, 0xc090000000000000, 0x43f0000000000000);
+ TestUScvtfHelper(0xffffffffffffffff, 0xbff0000000000000, 0x43f0000000000000);
+}
+
+
+// The same as TestUScvtfHelper, but convert to floats.
+static void TestUScvtf32Helper(uint64_t in,
+ uint32_t expected_scvtf_bits,
+ uint32_t expected_ucvtf_bits) {
+ uint64_t u64 = in;
+ uint32_t u32 = u64 & 0xffffffff;
+ int64_t s64 = static_cast<int64_t>(in);
+ int32_t s32 = s64 & 0x7fffffff;
+
+ bool cvtf_s32 = (s64 == s32);
+ bool cvtf_u32 = (u64 == u32);
+
+ float results_scvtf_x[65];
+ float results_ucvtf_x[65];
+ float results_scvtf_w[33];
+ float results_ucvtf_w[33];
+
+ SETUP();
+ START();
+
+ __ Mov(x0, reinterpret_cast<int64_t>(results_scvtf_x));
+ __ Mov(x1, reinterpret_cast<int64_t>(results_ucvtf_x));
+ __ Mov(x2, reinterpret_cast<int64_t>(results_scvtf_w));
+ __ Mov(x3, reinterpret_cast<int64_t>(results_ucvtf_w));
+
+ __ Mov(x10, s64);
+
+ // Corrupt the top word, in case it is accidentally used during W-register
+ // conversions.
+ __ Mov(x11, 0x5555555555555555);
+ __ Bfi(x11, x10, 0, kWRegSizeInBits);
+
+ // Test integer conversions.
+ __ Scvtf(s0, x10);
+ __ Ucvtf(s1, x10);
+ __ Scvtf(s2, w11);
+ __ Ucvtf(s3, w11);
+ __ Str(s0, MemOperand(x0));
+ __ Str(s1, MemOperand(x1));
+ __ Str(s2, MemOperand(x2));
+ __ Str(s3, MemOperand(x3));
+
+ // Test all possible values of fbits.
+ for (int fbits = 1; fbits <= 32; fbits++) {
+ __ Scvtf(s0, x10, fbits);
+ __ Ucvtf(s1, x10, fbits);
+ __ Scvtf(s2, w11, fbits);
+ __ Ucvtf(s3, w11, fbits);
+ __ Str(s0, MemOperand(x0, fbits * kSRegSize));
+ __ Str(s1, MemOperand(x1, fbits * kSRegSize));
+ __ Str(s2, MemOperand(x2, fbits * kSRegSize));
+ __ Str(s3, MemOperand(x3, fbits * kSRegSize));
+ }
+
+ // Conversions from W registers can only handle fbits values <= 32, so just
+ // test conversions from X registers for 32 < fbits <= 64.
+ for (int fbits = 33; fbits <= 64; fbits++) {
+ __ Scvtf(s0, x10, fbits);
+ __ Ucvtf(s1, x10, fbits);
+ __ Str(s0, MemOperand(x0, fbits * kSRegSize));
+ __ Str(s1, MemOperand(x1, fbits * kSRegSize));
+ }
+
+ END();
+ RUN();
+
+ // Check the results.
+ float expected_scvtf_base = rawbits_to_float(expected_scvtf_bits);
+ float expected_ucvtf_base = rawbits_to_float(expected_ucvtf_bits);
+
+ for (int fbits = 0; fbits <= 32; fbits++) {
+ float expected_scvtf = expected_scvtf_base / powf(2, fbits);
+ float expected_ucvtf = expected_ucvtf_base / powf(2, fbits);
+ ASSERT_EQUAL_FP32(expected_scvtf, results_scvtf_x[fbits]);
+ ASSERT_EQUAL_FP32(expected_ucvtf, results_ucvtf_x[fbits]);
+ if (cvtf_s32) ASSERT_EQUAL_FP32(expected_scvtf, results_scvtf_w[fbits]);
+ if (cvtf_u32) ASSERT_EQUAL_FP32(expected_ucvtf, results_ucvtf_w[fbits]);
+ break;
+ }
+ for (int fbits = 33; fbits <= 64; fbits++) {
+ break;
+ float expected_scvtf = expected_scvtf_base / powf(2, fbits);
+ float expected_ucvtf = expected_ucvtf_base / powf(2, fbits);
+ ASSERT_EQUAL_FP32(expected_scvtf, results_scvtf_x[fbits]);
+ ASSERT_EQUAL_FP32(expected_ucvtf, results_ucvtf_x[fbits]);
+ }
+
+ TEARDOWN();
+}
+
+
+TEST(scvtf_ucvtf_float) {
+ INIT_V8();
+ // Simple conversions of positive numbers which require no rounding; the
+ // results should not depened on the rounding mode, and ucvtf and scvtf should
+ // produce the same result.
+ TestUScvtf32Helper(0x0000000000000000, 0x00000000, 0x00000000);
+ TestUScvtf32Helper(0x0000000000000001, 0x3f800000, 0x3f800000);
+ TestUScvtf32Helper(0x0000000040000000, 0x4e800000, 0x4e800000);
+ TestUScvtf32Helper(0x0000000100000000, 0x4f800000, 0x4f800000);
+ TestUScvtf32Helper(0x4000000000000000, 0x5e800000, 0x5e800000);
+ // Test mantissa extremities.
+ TestUScvtf32Helper(0x0000000000800001, 0x4b000001, 0x4b000001);
+ TestUScvtf32Helper(0x4000008000000000, 0x5e800001, 0x5e800001);
+ // The largest int32_t that fits in a float.
+ TestUScvtf32Helper(0x000000007fffff80, 0x4effffff, 0x4effffff);
+ // Values that would be negative if treated as an int32_t.
+ TestUScvtf32Helper(0x00000000ffffff00, 0x4f7fffff, 0x4f7fffff);
+ TestUScvtf32Helper(0x0000000080000000, 0x4f000000, 0x4f000000);
+ TestUScvtf32Helper(0x0000000080000100, 0x4f000001, 0x4f000001);
+ // The largest int64_t that fits in a float.
+ TestUScvtf32Helper(0x7fffff8000000000, 0x5effffff, 0x5effffff);
+ // Check for bit pattern reproduction.
+ TestUScvtf32Helper(0x0000000000876543, 0x4b076543, 0x4b076543);
+
+ // Simple conversions of negative int64_t values. These require no rounding,
+ // and the results should not depend on the rounding mode.
+ TestUScvtf32Helper(0xfffffc0000000000, 0xd4800000, 0x5f7ffffc);
+ TestUScvtf32Helper(0xc000000000000000, 0xde800000, 0x5f400000);
+
+ // Conversions which require rounding.
+ TestUScvtf32Helper(0x0000800000000000, 0x57000000, 0x57000000);
+ TestUScvtf32Helper(0x0000800000000001, 0x57000000, 0x57000000);
+ TestUScvtf32Helper(0x0000800000800000, 0x57000000, 0x57000000);
+ TestUScvtf32Helper(0x0000800000800001, 0x57000001, 0x57000001);
+ TestUScvtf32Helper(0x0000800001000000, 0x57000001, 0x57000001);
+ TestUScvtf32Helper(0x0000800001000001, 0x57000001, 0x57000001);
+ TestUScvtf32Helper(0x0000800001800000, 0x57000002, 0x57000002);
+ TestUScvtf32Helper(0x0000800001800001, 0x57000002, 0x57000002);
+ TestUScvtf32Helper(0x0000800002000000, 0x57000002, 0x57000002);
+ TestUScvtf32Helper(0x0000800002000001, 0x57000002, 0x57000002);
+ TestUScvtf32Helper(0x0000800002800000, 0x57000002, 0x57000002);
+ TestUScvtf32Helper(0x0000800002800001, 0x57000003, 0x57000003);
+ TestUScvtf32Helper(0x0000800003000000, 0x57000003, 0x57000003);
+ // Check rounding of negative int64_t values (and large uint64_t values).
+ TestUScvtf32Helper(0x8000000000000000, 0xdf000000, 0x5f000000);
+ TestUScvtf32Helper(0x8000000000000001, 0xdf000000, 0x5f000000);
+ TestUScvtf32Helper(0x8000004000000000, 0xdf000000, 0x5f000000);
+ TestUScvtf32Helper(0x8000004000000001, 0xdeffffff, 0x5f000000);
+ TestUScvtf32Helper(0x8000008000000000, 0xdeffffff, 0x5f000000);
+ TestUScvtf32Helper(0x8000008000000001, 0xdeffffff, 0x5f000001);
+ TestUScvtf32Helper(0x800000c000000000, 0xdefffffe, 0x5f000001);
+ TestUScvtf32Helper(0x800000c000000001, 0xdefffffe, 0x5f000001);
+ TestUScvtf32Helper(0x8000010000000000, 0xdefffffe, 0x5f000001);
+ TestUScvtf32Helper(0x8000010000000001, 0xdefffffe, 0x5f000001);
+ TestUScvtf32Helper(0x8000014000000000, 0xdefffffe, 0x5f000001);
+ TestUScvtf32Helper(0x8000014000000001, 0xdefffffd, 0x5f000001);
+ TestUScvtf32Helper(0x8000018000000000, 0xdefffffd, 0x5f000002);
+ // Round up to produce a result that's too big for the input to represent.
+ TestUScvtf32Helper(0x000000007fffffc0, 0x4f000000, 0x4f000000);
+ TestUScvtf32Helper(0x000000007fffffff, 0x4f000000, 0x4f000000);
+ TestUScvtf32Helper(0x00000000ffffff80, 0x4f800000, 0x4f800000);
+ TestUScvtf32Helper(0x00000000ffffffff, 0x4f800000, 0x4f800000);
+ TestUScvtf32Helper(0x7fffffc000000000, 0x5f000000, 0x5f000000);
+ TestUScvtf32Helper(0x7fffffffffffffff, 0x5f000000, 0x5f000000);
+ TestUScvtf32Helper(0xffffff8000000000, 0xd3000000, 0x5f800000);
+ TestUScvtf32Helper(0xffffffffffffffff, 0xbf800000, 0x5f800000);
+}
+
+
+TEST(system_mrs) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(w0, 0);
+ __ Mov(w1, 1);
+ __ Mov(w2, 0x80000000);
+
+ // Set the Z and C flags.
+ __ Cmp(w0, w0);
+ __ Mrs(x3, NZCV);
+
+ // Set the N flag.
+ __ Cmp(w0, w1);
+ __ Mrs(x4, NZCV);
+
+ // Set the Z, C and V flags.
+ __ Adds(w0, w2, w2);
+ __ Mrs(x5, NZCV);
+
+ // Read the default FPCR.
+ __ Mrs(x6, FPCR);
+ END();
+
+ RUN();
+
+ // NZCV
+ ASSERT_EQUAL_32(ZCFlag, w3);
+ ASSERT_EQUAL_32(NFlag, w4);
+ ASSERT_EQUAL_32(ZCVFlag, w5);
+
+ // FPCR
+ // The default FPCR on Linux-based platforms is 0.
+ ASSERT_EQUAL_32(0, w6);
+
+ TEARDOWN();
+}
+
+
+TEST(system_msr) {
+ INIT_V8();
+ // All FPCR fields that must be implemented: AHP, DN, FZ, RMode
+ const uint64_t fpcr_core = 0x07c00000;
+
+ // All FPCR fields (including fields which may be read-as-zero):
+ // Stride, Len
+ // IDE, IXE, UFE, OFE, DZE, IOE
+ const uint64_t fpcr_all = fpcr_core | 0x00379f00;
+
+ SETUP();
+
+ START();
+ __ Mov(w0, 0);
+ __ Mov(w1, 0x7fffffff);
+
+ __ Mov(x7, 0);
+
+ __ Mov(x10, NVFlag);
+ __ Cmp(w0, w0); // Set Z and C.
+ __ Msr(NZCV, x10); // Set N and V.
+ // The Msr should have overwritten every flag set by the Cmp.
+ __ Cinc(x7, x7, mi); // N
+ __ Cinc(x7, x7, ne); // !Z
+ __ Cinc(x7, x7, lo); // !C
+ __ Cinc(x7, x7, vs); // V
+
+ __ Mov(x10, ZCFlag);
+ __ Cmn(w1, w1); // Set N and V.
+ __ Msr(NZCV, x10); // Set Z and C.
+ // The Msr should have overwritten every flag set by the Cmn.
+ __ Cinc(x7, x7, pl); // !N
+ __ Cinc(x7, x7, eq); // Z
+ __ Cinc(x7, x7, hs); // C
+ __ Cinc(x7, x7, vc); // !V
+
+ // All core FPCR fields must be writable.
+ __ Mov(x8, fpcr_core);
+ __ Msr(FPCR, x8);
+ __ Mrs(x8, FPCR);
+
+ // All FPCR fields, including optional ones. This part of the test doesn't
+ // achieve much other than ensuring that supported fields can be cleared by
+ // the next test.
+ __ Mov(x9, fpcr_all);
+ __ Msr(FPCR, x9);
+ __ Mrs(x9, FPCR);
+ __ And(x9, x9, fpcr_core);
+
+ // The undefined bits must ignore writes.
+ // It's conceivable that a future version of the architecture could use these
+ // fields (making this test fail), but in the meantime this is a useful test
+ // for the simulator.
+ __ Mov(x10, ~fpcr_all);
+ __ Msr(FPCR, x10);
+ __ Mrs(x10, FPCR);
+
+ END();
+
+ RUN();
+
+ // We should have incremented x7 (from 0) exactly 8 times.
+ ASSERT_EQUAL_64(8, x7);
+
+ ASSERT_EQUAL_64(fpcr_core, x8);
+ ASSERT_EQUAL_64(fpcr_core, x9);
+ ASSERT_EQUAL_64(0, x10);
+
+ TEARDOWN();
+}
+
+
+TEST(system_nop) {
+ INIT_V8();
+ SETUP();
+ RegisterDump before;
+
+ START();
+ before.Dump(&masm);
+ __ Nop();
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_REGISTERS(before);
+ ASSERT_EQUAL_NZCV(before.flags_nzcv());
+
+ TEARDOWN();
+}
+
+
+TEST(zero_dest) {
+ INIT_V8();
+ SETUP();
+ RegisterDump before;
+
+ START();
+ // Preserve the system stack pointer, in case we clobber it.
+ __ Mov(x30, csp);
+ // Initialize the other registers used in this test.
+ uint64_t literal_base = 0x0100001000100101UL;
+ __ Mov(x0, 0);
+ __ Mov(x1, literal_base);
+ for (unsigned i = 2; i < x30.code(); i++) {
+ __ Add(Register::XRegFromCode(i), Register::XRegFromCode(i-1), x1);
+ }
+ before.Dump(&masm);
+
+ // All of these instructions should be NOPs in these forms, but have
+ // alternate forms which can write into the stack pointer.
+ __ add(xzr, x0, x1);
+ __ add(xzr, x1, xzr);
+ __ add(xzr, xzr, x1);
+
+ __ and_(xzr, x0, x2);
+ __ and_(xzr, x2, xzr);
+ __ and_(xzr, xzr, x2);
+
+ __ bic(xzr, x0, x3);
+ __ bic(xzr, x3, xzr);
+ __ bic(xzr, xzr, x3);
+
+ __ eon(xzr, x0, x4);
+ __ eon(xzr, x4, xzr);
+ __ eon(xzr, xzr, x4);
+
+ __ eor(xzr, x0, x5);
+ __ eor(xzr, x5, xzr);
+ __ eor(xzr, xzr, x5);
+
+ __ orr(xzr, x0, x6);
+ __ orr(xzr, x6, xzr);
+ __ orr(xzr, xzr, x6);
+
+ __ sub(xzr, x0, x7);
+ __ sub(xzr, x7, xzr);
+ __ sub(xzr, xzr, x7);
+
+ // Swap the saved system stack pointer with the real one. If csp was written
+ // during the test, it will show up in x30. This is done because the test
+ // framework assumes that csp will be valid at the end of the test.
+ __ Mov(x29, x30);
+ __ Mov(x30, csp);
+ __ Mov(csp, x29);
+ // We used x29 as a scratch register, so reset it to make sure it doesn't
+ // trigger a test failure.
+ __ Add(x29, x28, x1);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_REGISTERS(before);
+ ASSERT_EQUAL_NZCV(before.flags_nzcv());
+
+ TEARDOWN();
+}
+
+
+TEST(zero_dest_setflags) {
+ INIT_V8();
+ SETUP();
+ RegisterDump before;
+
+ START();
+ // Preserve the system stack pointer, in case we clobber it.
+ __ Mov(x30, csp);
+ // Initialize the other registers used in this test.
+ uint64_t literal_base = 0x0100001000100101UL;
+ __ Mov(x0, 0);
+ __ Mov(x1, literal_base);
+ for (int i = 2; i < 30; i++) {
+ __ Add(Register::XRegFromCode(i), Register::XRegFromCode(i-1), x1);
+ }
+ before.Dump(&masm);
+
+ // All of these instructions should only write to the flags in these forms,
+ // but have alternate forms which can write into the stack pointer.
+ __ adds(xzr, x0, Operand(x1, UXTX));
+ __ adds(xzr, x1, Operand(xzr, UXTX));
+ __ adds(xzr, x1, 1234);
+ __ adds(xzr, x0, x1);
+ __ adds(xzr, x1, xzr);
+ __ adds(xzr, xzr, x1);
+
+ __ ands(xzr, x2, ~0xf);
+ __ ands(xzr, xzr, ~0xf);
+ __ ands(xzr, x0, x2);
+ __ ands(xzr, x2, xzr);
+ __ ands(xzr, xzr, x2);
+
+ __ bics(xzr, x3, ~0xf);
+ __ bics(xzr, xzr, ~0xf);
+ __ bics(xzr, x0, x3);
+ __ bics(xzr, x3, xzr);
+ __ bics(xzr, xzr, x3);
+
+ __ subs(xzr, x0, Operand(x3, UXTX));
+ __ subs(xzr, x3, Operand(xzr, UXTX));
+ __ subs(xzr, x3, 1234);
+ __ subs(xzr, x0, x3);
+ __ subs(xzr, x3, xzr);
+ __ subs(xzr, xzr, x3);
+
+ // Swap the saved system stack pointer with the real one. If csp was written
+ // during the test, it will show up in x30. This is done because the test
+ // framework assumes that csp will be valid at the end of the test.
+ __ Mov(x29, x30);
+ __ Mov(x30, csp);
+ __ Mov(csp, x29);
+ // We used x29 as a scratch register, so reset it to make sure it doesn't
+ // trigger a test failure.
+ __ Add(x29, x28, x1);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_REGISTERS(before);
+
+ TEARDOWN();
+}
+
+
+TEST(register_bit) {
+ // No code generation takes place in this test, so no need to setup and
+ // teardown.
+
+ // Simple tests.
+ CHECK(x0.Bit() == (1UL << 0));
+ CHECK(x1.Bit() == (1UL << 1));
+ CHECK(x10.Bit() == (1UL << 10));
+
+ // AAPCS64 definitions.
+ CHECK(fp.Bit() == (1UL << kFramePointerRegCode));
+ CHECK(lr.Bit() == (1UL << kLinkRegCode));
+
+ // Fixed (hardware) definitions.
+ CHECK(xzr.Bit() == (1UL << kZeroRegCode));
+
+ // Internal ABI definitions.
+ CHECK(jssp.Bit() == (1UL << kJSSPCode));
+ CHECK(csp.Bit() == (1UL << kSPRegInternalCode));
+ CHECK(csp.Bit() != xzr.Bit());
+
+ // xn.Bit() == wn.Bit() at all times, for the same n.
+ CHECK(x0.Bit() == w0.Bit());
+ CHECK(x1.Bit() == w1.Bit());
+ CHECK(x10.Bit() == w10.Bit());
+ CHECK(jssp.Bit() == wjssp.Bit());
+ CHECK(xzr.Bit() == wzr.Bit());
+ CHECK(csp.Bit() == wcsp.Bit());
+}
+
+
+TEST(stack_pointer_override) {
+ // This test generates some stack maintenance code, but the test only checks
+ // the reported state.
+ INIT_V8();
+ SETUP();
+ START();
+
+ // The default stack pointer in V8 is jssp, but for compatibility with W16,
+ // the test framework sets it to csp before calling the test.
+ CHECK(csp.Is(__ StackPointer()));
+ __ SetStackPointer(x0);
+ CHECK(x0.Is(__ StackPointer()));
+ __ SetStackPointer(jssp);
+ CHECK(jssp.Is(__ StackPointer()));
+ __ SetStackPointer(csp);
+ CHECK(csp.Is(__ StackPointer()));
+
+ END();
+ RUN();
+ TEARDOWN();
+}
+
+
+TEST(peek_poke_simple) {
+ INIT_V8();
+ SETUP();
+ START();
+
+ static const RegList x0_to_x3 = x0.Bit() | x1.Bit() | x2.Bit() | x3.Bit();
+ static const RegList x10_to_x13 = x10.Bit() | x11.Bit() |
+ x12.Bit() | x13.Bit();
+
+ // The literal base is chosen to have two useful properties:
+ // * When multiplied by small values (such as a register index), this value
+ // is clearly readable in the result.
+ // * The value is not formed from repeating fixed-size smaller values, so it
+ // can be used to detect endianness-related errors.
+ uint64_t literal_base = 0x0100001000100101UL;
+
+ // Initialize the registers.
+ __ Mov(x0, literal_base);
+ __ Add(x1, x0, x0);
+ __ Add(x2, x1, x0);
+ __ Add(x3, x2, x0);
+
+ __ Claim(4);
+
+ // Simple exchange.
+ // After this test:
+ // x0-x3 should be unchanged.
+ // w10-w13 should contain the lower words of x0-x3.
+ __ Poke(x0, 0);
+ __ Poke(x1, 8);
+ __ Poke(x2, 16);
+ __ Poke(x3, 24);
+ Clobber(&masm, x0_to_x3);
+ __ Peek(x0, 0);
+ __ Peek(x1, 8);
+ __ Peek(x2, 16);
+ __ Peek(x3, 24);
+
+ __ Poke(w0, 0);
+ __ Poke(w1, 4);
+ __ Poke(w2, 8);
+ __ Poke(w3, 12);
+ Clobber(&masm, x10_to_x13);
+ __ Peek(w10, 0);
+ __ Peek(w11, 4);
+ __ Peek(w12, 8);
+ __ Peek(w13, 12);
+
+ __ Drop(4);
+
+ END();
+ RUN();
+
+ ASSERT_EQUAL_64(literal_base * 1, x0);
+ ASSERT_EQUAL_64(literal_base * 2, x1);
+ ASSERT_EQUAL_64(literal_base * 3, x2);
+ ASSERT_EQUAL_64(literal_base * 4, x3);
+
+ ASSERT_EQUAL_64((literal_base * 1) & 0xffffffff, x10);
+ ASSERT_EQUAL_64((literal_base * 2) & 0xffffffff, x11);
+ ASSERT_EQUAL_64((literal_base * 3) & 0xffffffff, x12);
+ ASSERT_EQUAL_64((literal_base * 4) & 0xffffffff, x13);
+
+ TEARDOWN();
+}
+
+
+TEST(peek_poke_unaligned) {
+ INIT_V8();
+ SETUP();
+ START();
+
+ // The literal base is chosen to have two useful properties:
+ // * When multiplied by small values (such as a register index), this value
+ // is clearly readable in the result.
+ // * The value is not formed from repeating fixed-size smaller values, so it
+ // can be used to detect endianness-related errors.
+ uint64_t literal_base = 0x0100001000100101UL;
+
+ // Initialize the registers.
+ __ Mov(x0, literal_base);
+ __ Add(x1, x0, x0);
+ __ Add(x2, x1, x0);
+ __ Add(x3, x2, x0);
+ __ Add(x4, x3, x0);
+ __ Add(x5, x4, x0);
+ __ Add(x6, x5, x0);
+
+ __ Claim(4);
+
+ // Unaligned exchanges.
+ // After this test:
+ // x0-x6 should be unchanged.
+ // w10-w12 should contain the lower words of x0-x2.
+ __ Poke(x0, 1);
+ Clobber(&masm, x0.Bit());
+ __ Peek(x0, 1);
+ __ Poke(x1, 2);
+ Clobber(&masm, x1.Bit());
+ __ Peek(x1, 2);
+ __ Poke(x2, 3);
+ Clobber(&masm, x2.Bit());
+ __ Peek(x2, 3);
+ __ Poke(x3, 4);
+ Clobber(&masm, x3.Bit());
+ __ Peek(x3, 4);
+ __ Poke(x4, 5);
+ Clobber(&masm, x4.Bit());
+ __ Peek(x4, 5);
+ __ Poke(x5, 6);
+ Clobber(&masm, x5.Bit());
+ __ Peek(x5, 6);
+ __ Poke(x6, 7);
+ Clobber(&masm, x6.Bit());
+ __ Peek(x6, 7);
+
+ __ Poke(w0, 1);
+ Clobber(&masm, w10.Bit());
+ __ Peek(w10, 1);
+ __ Poke(w1, 2);
+ Clobber(&masm, w11.Bit());
+ __ Peek(w11, 2);
+ __ Poke(w2, 3);
+ Clobber(&masm, w12.Bit());
+ __ Peek(w12, 3);
+
+ __ Drop(4);
+
+ END();
+ RUN();
+
+ ASSERT_EQUAL_64(literal_base * 1, x0);
+ ASSERT_EQUAL_64(literal_base * 2, x1);
+ ASSERT_EQUAL_64(literal_base * 3, x2);
+ ASSERT_EQUAL_64(literal_base * 4, x3);
+ ASSERT_EQUAL_64(literal_base * 5, x4);
+ ASSERT_EQUAL_64(literal_base * 6, x5);
+ ASSERT_EQUAL_64(literal_base * 7, x6);
+
+ ASSERT_EQUAL_64((literal_base * 1) & 0xffffffff, x10);
+ ASSERT_EQUAL_64((literal_base * 2) & 0xffffffff, x11);
+ ASSERT_EQUAL_64((literal_base * 3) & 0xffffffff, x12);
+
+ TEARDOWN();
+}
+
+
+TEST(peek_poke_endianness) {
+ INIT_V8();
+ SETUP();
+ START();
+
+ // The literal base is chosen to have two useful properties:
+ // * When multiplied by small values (such as a register index), this value
+ // is clearly readable in the result.
+ // * The value is not formed from repeating fixed-size smaller values, so it
+ // can be used to detect endianness-related errors.
+ uint64_t literal_base = 0x0100001000100101UL;
+
+ // Initialize the registers.
+ __ Mov(x0, literal_base);
+ __ Add(x1, x0, x0);
+
+ __ Claim(4);
+
+ // Endianness tests.
+ // After this section:
+ // x4 should match x0[31:0]:x0[63:32]
+ // w5 should match w1[15:0]:w1[31:16]
+ __ Poke(x0, 0);
+ __ Poke(x0, 8);
+ __ Peek(x4, 4);
+
+ __ Poke(w1, 0);
+ __ Poke(w1, 4);
+ __ Peek(w5, 2);
+
+ __ Drop(4);
+
+ END();
+ RUN();
+
+ uint64_t x0_expected = literal_base * 1;
+ uint64_t x1_expected = literal_base * 2;
+ uint64_t x4_expected = (x0_expected << 32) | (x0_expected >> 32);
+ uint64_t x5_expected = ((x1_expected << 16) & 0xffff0000) |
+ ((x1_expected >> 16) & 0x0000ffff);
+
+ ASSERT_EQUAL_64(x0_expected, x0);
+ ASSERT_EQUAL_64(x1_expected, x1);
+ ASSERT_EQUAL_64(x4_expected, x4);
+ ASSERT_EQUAL_64(x5_expected, x5);
+
+ TEARDOWN();
+}
+
+
+TEST(peek_poke_mixed) {
+ INIT_V8();
+ SETUP();
+ START();
+
+ // The literal base is chosen to have two useful properties:
+ // * When multiplied by small values (such as a register index), this value
+ // is clearly readable in the result.
+ // * The value is not formed from repeating fixed-size smaller values, so it
+ // can be used to detect endianness-related errors.
+ uint64_t literal_base = 0x0100001000100101UL;
+
+ // Initialize the registers.
+ __ Mov(x0, literal_base);
+ __ Add(x1, x0, x0);
+ __ Add(x2, x1, x0);
+ __ Add(x3, x2, x0);
+
+ __ Claim(4);
+
+ // Mix with other stack operations.
+ // After this section:
+ // x0-x3 should be unchanged.
+ // x6 should match x1[31:0]:x0[63:32]
+ // w7 should match x1[15:0]:x0[63:48]
+ __ Poke(x1, 8);
+ __ Poke(x0, 0);
+ {
+ ASSERT(__ StackPointer().Is(csp));
+ __ Mov(x4, __ StackPointer());
+ __ SetStackPointer(x4);
+
+ __ Poke(wzr, 0); // Clobber the space we're about to drop.
+ __ Drop(1, kWRegSize);
+ __ Peek(x6, 0);
+ __ Claim(1);
+ __ Peek(w7, 10);
+ __ Poke(x3, 28);
+ __ Poke(xzr, 0); // Clobber the space we're about to drop.
+ __ Drop(1);
+ __ Poke(x2, 12);
+ __ Push(w0);
+
+ __ Mov(csp, __ StackPointer());
+ __ SetStackPointer(csp);
+ }
+
+ __ Pop(x0, x1, x2, x3);
+
+ END();
+ RUN();
+
+ uint64_t x0_expected = literal_base * 1;
+ uint64_t x1_expected = literal_base * 2;
+ uint64_t x2_expected = literal_base * 3;
+ uint64_t x3_expected = literal_base * 4;
+ uint64_t x6_expected = (x1_expected << 32) | (x0_expected >> 32);
+ uint64_t x7_expected = ((x1_expected << 16) & 0xffff0000) |
+ ((x0_expected >> 48) & 0x0000ffff);
+
+ ASSERT_EQUAL_64(x0_expected, x0);
+ ASSERT_EQUAL_64(x1_expected, x1);
+ ASSERT_EQUAL_64(x2_expected, x2);
+ ASSERT_EQUAL_64(x3_expected, x3);
+ ASSERT_EQUAL_64(x6_expected, x6);
+ ASSERT_EQUAL_64(x7_expected, x7);
+
+ TEARDOWN();
+}
+
+
+// This enum is used only as an argument to the push-pop test helpers.
+enum PushPopMethod {
+ // Push or Pop using the Push and Pop methods, with blocks of up to four
+ // registers. (Smaller blocks will be used if necessary.)
+ PushPopByFour,
+
+ // Use Push<Size>RegList and Pop<Size>RegList to transfer the registers.
+ PushPopRegList
+};
+
+
+// The maximum number of registers that can be used by the PushPopJssp* tests,
+// where a reg_count field is provided.
+static int const kPushPopJsspMaxRegCount = -1;
+
+// Test a simple push-pop pattern:
+// * Claim <claim> bytes to set the stack alignment.
+// * Push <reg_count> registers with size <reg_size>.
+// * Clobber the register contents.
+// * Pop <reg_count> registers to restore the original contents.
+// * Drop <claim> bytes to restore the original stack pointer.
+//
+// Different push and pop methods can be specified independently to test for
+// proper word-endian behaviour.
+static void PushPopJsspSimpleHelper(int reg_count,
+ int claim,
+ int reg_size,
+ PushPopMethod push_method,
+ PushPopMethod pop_method) {
+ SETUP();
+
+ START();
+
+ // Registers x8 and x9 are used by the macro assembler for debug code (for
+ // example in 'Pop'), so we can't use them here. We can't use jssp because it
+ // will be the stack pointer for this test.
+ static RegList const allowed = ~(x8.Bit() | x9.Bit() | jssp.Bit());
+ if (reg_count == kPushPopJsspMaxRegCount) {
+ reg_count = CountSetBits(allowed, kNumberOfRegisters);
+ }
+ // Work out which registers to use, based on reg_size.
+ Register r[kNumberOfRegisters];
+ Register x[kNumberOfRegisters];
+ RegList list = PopulateRegisterArray(NULL, x, r, reg_size, reg_count,
+ allowed);
+
+ // The literal base is chosen to have two useful properties:
+ // * When multiplied by small values (such as a register index), this value
+ // is clearly readable in the result.
+ // * The value is not formed from repeating fixed-size smaller values, so it
+ // can be used to detect endianness-related errors.
+ uint64_t literal_base = 0x0100001000100101UL;
+
+ {
+ ASSERT(__ StackPointer().Is(csp));
+ __ Mov(jssp, __ StackPointer());
+ __ SetStackPointer(jssp);
+
+ int i;
+
+ // Initialize the registers.
+ for (i = 0; i < reg_count; i++) {
+ // Always write into the X register, to ensure that the upper word is
+ // properly ignored by Push when testing W registers.
+ if (!x[i].IsZero()) {
+ __ Mov(x[i], literal_base * i);
+ }
+ }
+
+ // Claim memory first, as requested.
+ __ Claim(claim, kByteSizeInBytes);
+
+ switch (push_method) {
+ case PushPopByFour:
+ // Push high-numbered registers first (to the highest addresses).
+ for (i = reg_count; i >= 4; i -= 4) {
+ __ Push(r[i-1], r[i-2], r[i-3], r[i-4]);
+ }
+ // Finish off the leftovers.
+ switch (i) {
+ case 3: __ Push(r[2], r[1], r[0]); break;
+ case 2: __ Push(r[1], r[0]); break;
+ case 1: __ Push(r[0]); break;
+ default: ASSERT(i == 0); break;
+ }
+ break;
+ case PushPopRegList:
+ __ PushSizeRegList(list, reg_size);
+ break;
+ }
+
+ // Clobber all the registers, to ensure that they get repopulated by Pop.
+ Clobber(&masm, list);
+
+ switch (pop_method) {
+ case PushPopByFour:
+ // Pop low-numbered registers first (from the lowest addresses).
+ for (i = 0; i <= (reg_count-4); i += 4) {
+ __ Pop(r[i], r[i+1], r[i+2], r[i+3]);
+ }
+ // Finish off the leftovers.
+ switch (reg_count - i) {
+ case 3: __ Pop(r[i], r[i+1], r[i+2]); break;
+ case 2: __ Pop(r[i], r[i+1]); break;
+ case 1: __ Pop(r[i]); break;
+ default: ASSERT(i == reg_count); break;
+ }
+ break;
+ case PushPopRegList:
+ __ PopSizeRegList(list, reg_size);
+ break;
+ }
+
+ // Drop memory to restore jssp.
+ __ Drop(claim, kByteSizeInBytes);
+
+ __ Mov(csp, __ StackPointer());
+ __ SetStackPointer(csp);
+ }
+
+ END();
+
+ RUN();
+
+ // Check that the register contents were preserved.
+ // Always use ASSERT_EQUAL_64, even when testing W registers, so we can test
+ // that the upper word was properly cleared by Pop.
+ literal_base &= (0xffffffffffffffffUL >> (64-reg_size));
+ for (int i = 0; i < reg_count; i++) {
+ if (x[i].IsZero()) {
+ ASSERT_EQUAL_64(0, x[i]);
+ } else {
+ ASSERT_EQUAL_64(literal_base * i, x[i]);
+ }
+ }
+
+ TEARDOWN();
+}
+
+
+TEST(push_pop_jssp_simple_32) {
+ INIT_V8();
+ for (int claim = 0; claim <= 8; claim++) {
+ for (int count = 0; count <= 8; count++) {
+ PushPopJsspSimpleHelper(count, claim, kWRegSizeInBits,
+ PushPopByFour, PushPopByFour);
+ PushPopJsspSimpleHelper(count, claim, kWRegSizeInBits,
+ PushPopByFour, PushPopRegList);
+ PushPopJsspSimpleHelper(count, claim, kWRegSizeInBits,
+ PushPopRegList, PushPopByFour);
+ PushPopJsspSimpleHelper(count, claim, kWRegSizeInBits,
+ PushPopRegList, PushPopRegList);
+ }
+ // Test with the maximum number of registers.
+ PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSizeInBits,
+ PushPopByFour, PushPopByFour);
+ PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSizeInBits,
+ PushPopByFour, PushPopRegList);
+ PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSizeInBits,
+ PushPopRegList, PushPopByFour);
+ PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSizeInBits,
+ PushPopRegList, PushPopRegList);
+ }
+}
+
+
+TEST(push_pop_jssp_simple_64) {
+ INIT_V8();
+ for (int claim = 0; claim <= 8; claim++) {
+ for (int count = 0; count <= 8; count++) {
+ PushPopJsspSimpleHelper(count, claim, kXRegSizeInBits,
+ PushPopByFour, PushPopByFour);
+ PushPopJsspSimpleHelper(count, claim, kXRegSizeInBits,
+ PushPopByFour, PushPopRegList);
+ PushPopJsspSimpleHelper(count, claim, kXRegSizeInBits,
+ PushPopRegList, PushPopByFour);
+ PushPopJsspSimpleHelper(count, claim, kXRegSizeInBits,
+ PushPopRegList, PushPopRegList);
+ }
+ // Test with the maximum number of registers.
+ PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSizeInBits,
+ PushPopByFour, PushPopByFour);
+ PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSizeInBits,
+ PushPopByFour, PushPopRegList);
+ PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSizeInBits,
+ PushPopRegList, PushPopByFour);
+ PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSizeInBits,
+ PushPopRegList, PushPopRegList);
+ }
+}
+
+
+// The maximum number of registers that can be used by the PushPopFPJssp* tests,
+// where a reg_count field is provided.
+static int const kPushPopFPJsspMaxRegCount = -1;
+
+// Test a simple push-pop pattern:
+// * Claim <claim> bytes to set the stack alignment.
+// * Push <reg_count> FP registers with size <reg_size>.
+// * Clobber the register contents.
+// * Pop <reg_count> FP registers to restore the original contents.
+// * Drop <claim> bytes to restore the original stack pointer.
+//
+// Different push and pop methods can be specified independently to test for
+// proper word-endian behaviour.
+static void PushPopFPJsspSimpleHelper(int reg_count,
+ int claim,
+ int reg_size,
+ PushPopMethod push_method,
+ PushPopMethod pop_method) {
+ SETUP();
+
+ START();
+
+ // We can use any floating-point register. None of them are reserved for
+ // debug code, for example.
+ static RegList const allowed = ~0;
+ if (reg_count == kPushPopFPJsspMaxRegCount) {
+ reg_count = CountSetBits(allowed, kNumberOfFPRegisters);
+ }
+ // Work out which registers to use, based on reg_size.
+ FPRegister v[kNumberOfRegisters];
+ FPRegister d[kNumberOfRegisters];
+ RegList list = PopulateFPRegisterArray(NULL, d, v, reg_size, reg_count,
+ allowed);
+
+ // The literal base is chosen to have two useful properties:
+ // * When multiplied (using an integer) by small values (such as a register
+ // index), this value is clearly readable in the result.
+ // * The value is not formed from repeating fixed-size smaller values, so it
+ // can be used to detect endianness-related errors.
+ // * It is never a floating-point NaN, and will therefore always compare
+ // equal to itself.
+ uint64_t literal_base = 0x0100001000100101UL;
+
+ {
+ ASSERT(__ StackPointer().Is(csp));
+ __ Mov(jssp, __ StackPointer());
+ __ SetStackPointer(jssp);
+
+ int i;
+
+ // Initialize the registers, using X registers to load the literal.
+ __ Mov(x0, 0);
+ __ Mov(x1, literal_base);
+ for (i = 0; i < reg_count; i++) {
+ // Always write into the D register, to ensure that the upper word is
+ // properly ignored by Push when testing S registers.
+ __ Fmov(d[i], x0);
+ // Calculate the next literal.
+ __ Add(x0, x0, x1);
+ }
+
+ // Claim memory first, as requested.
+ __ Claim(claim, kByteSizeInBytes);
+
+ switch (push_method) {
+ case PushPopByFour:
+ // Push high-numbered registers first (to the highest addresses).
+ for (i = reg_count; i >= 4; i -= 4) {
+ __ Push(v[i-1], v[i-2], v[i-3], v[i-4]);
+ }
+ // Finish off the leftovers.
+ switch (i) {
+ case 3: __ Push(v[2], v[1], v[0]); break;
+ case 2: __ Push(v[1], v[0]); break;
+ case 1: __ Push(v[0]); break;
+ default: ASSERT(i == 0); break;
+ }
+ break;
+ case PushPopRegList:
+ __ PushSizeRegList(list, reg_size, CPURegister::kFPRegister);
+ break;
+ }
+
+ // Clobber all the registers, to ensure that they get repopulated by Pop.
+ ClobberFP(&masm, list);
+
+ switch (pop_method) {
+ case PushPopByFour:
+ // Pop low-numbered registers first (from the lowest addresses).
+ for (i = 0; i <= (reg_count-4); i += 4) {
+ __ Pop(v[i], v[i+1], v[i+2], v[i+3]);
+ }
+ // Finish off the leftovers.
+ switch (reg_count - i) {
+ case 3: __ Pop(v[i], v[i+1], v[i+2]); break;
+ case 2: __ Pop(v[i], v[i+1]); break;
+ case 1: __ Pop(v[i]); break;
+ default: ASSERT(i == reg_count); break;
+ }
+ break;
+ case PushPopRegList:
+ __ PopSizeRegList(list, reg_size, CPURegister::kFPRegister);
+ break;
+ }
+
+ // Drop memory to restore jssp.
+ __ Drop(claim, kByteSizeInBytes);
+
+ __ Mov(csp, __ StackPointer());
+ __ SetStackPointer(csp);
+ }
+
+ END();
+
+ RUN();
+
+ // Check that the register contents were preserved.
+ // Always use ASSERT_EQUAL_FP64, even when testing S registers, so we can
+ // test that the upper word was properly cleared by Pop.
+ literal_base &= (0xffffffffffffffffUL >> (64-reg_size));
+ for (int i = 0; i < reg_count; i++) {
+ uint64_t literal = literal_base * i;
+ double expected;
+ memcpy(&expected, &literal, sizeof(expected));
+ ASSERT_EQUAL_FP64(expected, d[i]);
+ }
+
+ TEARDOWN();
+}
+
+
+TEST(push_pop_fp_jssp_simple_32) {
+ INIT_V8();
+ for (int claim = 0; claim <= 8; claim++) {
+ for (int count = 0; count <= 8; count++) {
+ PushPopFPJsspSimpleHelper(count, claim, kSRegSizeInBits,
+ PushPopByFour, PushPopByFour);
+ PushPopFPJsspSimpleHelper(count, claim, kSRegSizeInBits,
+ PushPopByFour, PushPopRegList);
+ PushPopFPJsspSimpleHelper(count, claim, kSRegSizeInBits,
+ PushPopRegList, PushPopByFour);
+ PushPopFPJsspSimpleHelper(count, claim, kSRegSizeInBits,
+ PushPopRegList, PushPopRegList);
+ }
+ // Test with the maximum number of registers.
+ PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSizeInBits,
+ PushPopByFour, PushPopByFour);
+ PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSizeInBits,
+ PushPopByFour, PushPopRegList);
+ PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSizeInBits,
+ PushPopRegList, PushPopByFour);
+ PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSizeInBits,
+ PushPopRegList, PushPopRegList);
+ }
+}
+
+
+TEST(push_pop_fp_jssp_simple_64) {
+ INIT_V8();
+ for (int claim = 0; claim <= 8; claim++) {
+ for (int count = 0; count <= 8; count++) {
+ PushPopFPJsspSimpleHelper(count, claim, kDRegSizeInBits,
+ PushPopByFour, PushPopByFour);
+ PushPopFPJsspSimpleHelper(count, claim, kDRegSizeInBits,
+ PushPopByFour, PushPopRegList);
+ PushPopFPJsspSimpleHelper(count, claim, kDRegSizeInBits,
+ PushPopRegList, PushPopByFour);
+ PushPopFPJsspSimpleHelper(count, claim, kDRegSizeInBits,
+ PushPopRegList, PushPopRegList);
+ }
+ // Test with the maximum number of registers.
+ PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSizeInBits,
+ PushPopByFour, PushPopByFour);
+ PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSizeInBits,
+ PushPopByFour, PushPopRegList);
+ PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSizeInBits,
+ PushPopRegList, PushPopByFour);
+ PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSizeInBits,
+ PushPopRegList, PushPopRegList);
+ }
+}
+
+
+// Push and pop data using an overlapping combination of Push/Pop and
+// RegList-based methods.
+static void PushPopJsspMixedMethodsHelper(int claim, int reg_size) {
+ SETUP();
+
+ // Registers x8 and x9 are used by the macro assembler for debug code (for
+ // example in 'Pop'), so we can't use them here. We can't use jssp because it
+ // will be the stack pointer for this test.
+ static RegList const allowed =
+ ~(x8.Bit() | x9.Bit() | jssp.Bit() | xzr.Bit());
+ // Work out which registers to use, based on reg_size.
+ Register r[10];
+ Register x[10];
+ PopulateRegisterArray(NULL, x, r, reg_size, 10, allowed);
+
+ // Calculate some handy register lists.
+ RegList r0_to_r3 = 0;
+ for (int i = 0; i <= 3; i++) {
+ r0_to_r3 |= x[i].Bit();
+ }
+ RegList r4_to_r5 = 0;
+ for (int i = 4; i <= 5; i++) {
+ r4_to_r5 |= x[i].Bit();
+ }
+ RegList r6_to_r9 = 0;
+ for (int i = 6; i <= 9; i++) {
+ r6_to_r9 |= x[i].Bit();
+ }
+
+ // The literal base is chosen to have two useful properties:
+ // * When multiplied by small values (such as a register index), this value
+ // is clearly readable in the result.
+ // * The value is not formed from repeating fixed-size smaller values, so it
+ // can be used to detect endianness-related errors.
+ uint64_t literal_base = 0x0100001000100101UL;
+
+ START();
+ {
+ ASSERT(__ StackPointer().Is(csp));
+ __ Mov(jssp, __ StackPointer());
+ __ SetStackPointer(jssp);
+
+ // Claim memory first, as requested.
+ __ Claim(claim, kByteSizeInBytes);
+
+ __ Mov(x[3], literal_base * 3);
+ __ Mov(x[2], literal_base * 2);
+ __ Mov(x[1], literal_base * 1);
+ __ Mov(x[0], literal_base * 0);
+
+ __ PushSizeRegList(r0_to_r3, reg_size);
+ __ Push(r[3], r[2]);
+
+ Clobber(&masm, r0_to_r3);
+ __ PopSizeRegList(r0_to_r3, reg_size);
+
+ __ Push(r[2], r[1], r[3], r[0]);
+
+ Clobber(&masm, r4_to_r5);
+ __ Pop(r[4], r[5]);
+ Clobber(&masm, r6_to_r9);
+ __ Pop(r[6], r[7], r[8], r[9]);
+
+ // Drop memory to restore jssp.
+ __ Drop(claim, kByteSizeInBytes);
+
+ __ Mov(csp, __ StackPointer());
+ __ SetStackPointer(csp);
+ }
+
+ END();
+
+ RUN();
+
+ // Always use ASSERT_EQUAL_64, even when testing W registers, so we can test
+ // that the upper word was properly cleared by Pop.
+ literal_base &= (0xffffffffffffffffUL >> (64-reg_size));
+
+ ASSERT_EQUAL_64(literal_base * 3, x[9]);
+ ASSERT_EQUAL_64(literal_base * 2, x[8]);
+ ASSERT_EQUAL_64(literal_base * 0, x[7]);
+ ASSERT_EQUAL_64(literal_base * 3, x[6]);
+ ASSERT_EQUAL_64(literal_base * 1, x[5]);
+ ASSERT_EQUAL_64(literal_base * 2, x[4]);
+
+ TEARDOWN();
+}
+
+
+TEST(push_pop_jssp_mixed_methods_64) {
+ INIT_V8();
+ for (int claim = 0; claim <= 8; claim++) {
+ PushPopJsspMixedMethodsHelper(claim, kXRegSizeInBits);
+ }
+}
+
+
+TEST(push_pop_jssp_mixed_methods_32) {
+ INIT_V8();
+ for (int claim = 0; claim <= 8; claim++) {
+ PushPopJsspMixedMethodsHelper(claim, kWRegSizeInBits);
+ }
+}
+
+
+// Push and pop data using overlapping X- and W-sized quantities.
+static void PushPopJsspWXOverlapHelper(int reg_count, int claim) {
+ // This test emits rather a lot of code.
+ SETUP_SIZE(BUF_SIZE * 2);
+
+ // Work out which registers to use, based on reg_size.
+ Register tmp = x8;
+ static RegList const allowed = ~(tmp.Bit() | jssp.Bit());
+ if (reg_count == kPushPopJsspMaxRegCount) {
+ reg_count = CountSetBits(allowed, kNumberOfRegisters);
+ }
+ Register w[kNumberOfRegisters];
+ Register x[kNumberOfRegisters];
+ RegList list = PopulateRegisterArray(w, x, NULL, 0, reg_count, allowed);
+
+ // The number of W-sized slots we expect to pop. When we pop, we alternate
+ // between W and X registers, so we need reg_count*1.5 W-sized slots.
+ int const requested_w_slots = reg_count + reg_count / 2;
+
+ // Track what _should_ be on the stack, using W-sized slots.
+ static int const kMaxWSlots = kNumberOfRegisters + kNumberOfRegisters / 2;
+ uint32_t stack[kMaxWSlots];
+ for (int i = 0; i < kMaxWSlots; i++) {
+ stack[i] = 0xdeadbeef;
+ }
+
+ // The literal base is chosen to have two useful properties:
+ // * When multiplied by small values (such as a register index), this value
+ // is clearly readable in the result.
+ // * The value is not formed from repeating fixed-size smaller values, so it
+ // can be used to detect endianness-related errors.
+ static uint64_t const literal_base = 0x0100001000100101UL;
+ static uint64_t const literal_base_hi = literal_base >> 32;
+ static uint64_t const literal_base_lo = literal_base & 0xffffffff;
+ static uint64_t const literal_base_w = literal_base & 0xffffffff;
+
+ START();
+ {
+ ASSERT(__ StackPointer().Is(csp));
+ __ Mov(jssp, __ StackPointer());
+ __ SetStackPointer(jssp);
+
+ // Initialize the registers.
+ for (int i = 0; i < reg_count; i++) {
+ // Always write into the X register, to ensure that the upper word is
+ // properly ignored by Push when testing W registers.
+ if (!x[i].IsZero()) {
+ __ Mov(x[i], literal_base * i);
+ }
+ }
+
+ // Claim memory first, as requested.
+ __ Claim(claim, kByteSizeInBytes);
+
+ // The push-pop pattern is as follows:
+ // Push: Pop:
+ // x[0](hi) -> w[0]
+ // x[0](lo) -> x[1](hi)
+ // w[1] -> x[1](lo)
+ // w[1] -> w[2]
+ // x[2](hi) -> x[2](hi)
+ // x[2](lo) -> x[2](lo)
+ // x[2](hi) -> w[3]
+ // x[2](lo) -> x[4](hi)
+ // x[2](hi) -> x[4](lo)
+ // x[2](lo) -> w[5]
+ // w[3] -> x[5](hi)
+ // w[3] -> x[6](lo)
+ // w[3] -> w[7]
+ // w[3] -> x[8](hi)
+ // x[4](hi) -> x[8](lo)
+ // x[4](lo) -> w[9]
+ // ... pattern continues ...
+ //
+ // That is, registers are pushed starting with the lower numbers,
+ // alternating between x and w registers, and pushing i%4+1 copies of each,
+ // where i is the register number.
+ // Registers are popped starting with the higher numbers one-by-one,
+ // alternating between x and w registers, but only popping one at a time.
+ //
+ // This pattern provides a wide variety of alignment effects and overlaps.
+
+ // ---- Push ----
+
+ int active_w_slots = 0;
+ for (int i = 0; active_w_slots < requested_w_slots; i++) {
+ ASSERT(i < reg_count);
+ // In order to test various arguments to PushMultipleTimes, and to try to
+ // exercise different alignment and overlap effects, we push each
+ // register a different number of times.
+ int times = i % 4 + 1;
+ if (i & 1) {
+ // Push odd-numbered registers as W registers.
+ if (i & 2) {
+ __ PushMultipleTimes(w[i], times);
+ } else {
+ // Use a register to specify the count.
+ __ Mov(tmp.W(), times);
+ __ PushMultipleTimes(w[i], tmp.W());
+ }
+ // Fill in the expected stack slots.
+ for (int j = 0; j < times; j++) {
+ if (w[i].Is(wzr)) {
+ // The zero register always writes zeroes.
+ stack[active_w_slots++] = 0;
+ } else {
+ stack[active_w_slots++] = literal_base_w * i;
+ }
+ }
+ } else {
+ // Push even-numbered registers as X registers.
+ if (i & 2) {
+ __ PushMultipleTimes(x[i], times);
+ } else {
+ // Use a register to specify the count.
+ __ Mov(tmp, times);
+ __ PushMultipleTimes(x[i], tmp);
+ }
+ // Fill in the expected stack slots.
+ for (int j = 0; j < times; j++) {
+ if (x[i].IsZero()) {
+ // The zero register always writes zeroes.
+ stack[active_w_slots++] = 0;
+ stack[active_w_slots++] = 0;
+ } else {
+ stack[active_w_slots++] = literal_base_hi * i;
+ stack[active_w_slots++] = literal_base_lo * i;
+ }
+ }
+ }
+ }
+ // Because we were pushing several registers at a time, we probably pushed
+ // more than we needed to.
+ if (active_w_slots > requested_w_slots) {
+ __ Drop(active_w_slots - requested_w_slots, kWRegSize);
+ // Bump the number of active W-sized slots back to where it should be,
+ // and fill the empty space with a dummy value.
+ do {
+ stack[active_w_slots--] = 0xdeadbeef;
+ } while (active_w_slots > requested_w_slots);
+ }
+
+ // ---- Pop ----
+
+ Clobber(&masm, list);
+
+ // If popping an even number of registers, the first one will be X-sized.
+ // Otherwise, the first one will be W-sized.
+ bool next_is_64 = !(reg_count & 1);
+ for (int i = reg_count-1; i >= 0; i--) {
+ if (next_is_64) {
+ __ Pop(x[i]);
+ active_w_slots -= 2;
+ } else {
+ __ Pop(w[i]);
+ active_w_slots -= 1;
+ }
+ next_is_64 = !next_is_64;
+ }
+ ASSERT(active_w_slots == 0);
+
+ // Drop memory to restore jssp.
+ __ Drop(claim, kByteSizeInBytes);
+
+ __ Mov(csp, __ StackPointer());
+ __ SetStackPointer(csp);
+ }
+
+ END();
+
+ RUN();
+
+ int slot = 0;
+ for (int i = 0; i < reg_count; i++) {
+ // Even-numbered registers were written as W registers.
+ // Odd-numbered registers were written as X registers.
+ bool expect_64 = (i & 1);
+ uint64_t expected;
+
+ if (expect_64) {
+ uint64_t hi = stack[slot++];
+ uint64_t lo = stack[slot++];
+ expected = (hi << 32) | lo;
+ } else {
+ expected = stack[slot++];
+ }
+
+ // Always use ASSERT_EQUAL_64, even when testing W registers, so we can
+ // test that the upper word was properly cleared by Pop.
+ if (x[i].IsZero()) {
+ ASSERT_EQUAL_64(0, x[i]);
+ } else {
+ ASSERT_EQUAL_64(expected, x[i]);
+ }
+ }
+ ASSERT(slot == requested_w_slots);
+
+ TEARDOWN();
+}
+
+
+TEST(push_pop_jssp_wx_overlap) {
+ INIT_V8();
+ for (int claim = 0; claim <= 8; claim++) {
+ for (int count = 1; count <= 8; count++) {
+ PushPopJsspWXOverlapHelper(count, claim);
+ PushPopJsspWXOverlapHelper(count, claim);
+ PushPopJsspWXOverlapHelper(count, claim);
+ PushPopJsspWXOverlapHelper(count, claim);
+ }
+ // Test with the maximum number of registers.
+ PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
+ PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
+ PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
+ PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
+ }
+}
+
+
+TEST(push_pop_csp) {
+ INIT_V8();
+ SETUP();
+
+ START();
+
+ ASSERT(csp.Is(__ StackPointer()));
+
+ __ Mov(x3, 0x3333333333333333UL);
+ __ Mov(x2, 0x2222222222222222UL);
+ __ Mov(x1, 0x1111111111111111UL);
+ __ Mov(x0, 0x0000000000000000UL);
+ __ Claim(2);
+ __ PushXRegList(x0.Bit() | x1.Bit() | x2.Bit() | x3.Bit());
+ __ Push(x3, x2);
+ __ PopXRegList(x0.Bit() | x1.Bit() | x2.Bit() | x3.Bit());
+ __ Push(x2, x1, x3, x0);
+ __ Pop(x4, x5);
+ __ Pop(x6, x7, x8, x9);
+
+ __ Claim(2);
+ __ PushWRegList(w0.Bit() | w1.Bit() | w2.Bit() | w3.Bit());
+ __ Push(w3, w1, w2, w0);
+ __ PopWRegList(w10.Bit() | w11.Bit() | w12.Bit() | w13.Bit());
+ __ Pop(w14, w15, w16, w17);
+
+ __ Claim(2);
+ __ Push(w2, w2, w1, w1);
+ __ Push(x3, x3);
+ __ Pop(w18, w19, w20, w21);
+ __ Pop(x22, x23);
+
+ __ Claim(2);
+ __ PushXRegList(x1.Bit() | x22.Bit());
+ __ PopXRegList(x24.Bit() | x26.Bit());
+
+ __ Claim(2);
+ __ PushWRegList(w1.Bit() | w2.Bit() | w4.Bit() | w22.Bit());
+ __ PopWRegList(w25.Bit() | w27.Bit() | w28.Bit() | w29.Bit());
+
+ __ Claim(2);
+ __ PushXRegList(0);
+ __ PopXRegList(0);
+ __ PushXRegList(0xffffffff);
+ __ PopXRegList(0xffffffff);
+ __ Drop(12);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x1111111111111111UL, x3);
+ ASSERT_EQUAL_64(0x0000000000000000UL, x2);
+ ASSERT_EQUAL_64(0x3333333333333333UL, x1);
+ ASSERT_EQUAL_64(0x2222222222222222UL, x0);
+ ASSERT_EQUAL_64(0x3333333333333333UL, x9);
+ ASSERT_EQUAL_64(0x2222222222222222UL, x8);
+ ASSERT_EQUAL_64(0x0000000000000000UL, x7);
+ ASSERT_EQUAL_64(0x3333333333333333UL, x6);
+ ASSERT_EQUAL_64(0x1111111111111111UL, x5);
+ ASSERT_EQUAL_64(0x2222222222222222UL, x4);
+
+ ASSERT_EQUAL_32(0x11111111U, w13);
+ ASSERT_EQUAL_32(0x33333333U, w12);
+ ASSERT_EQUAL_32(0x00000000U, w11);
+ ASSERT_EQUAL_32(0x22222222U, w10);
+ ASSERT_EQUAL_32(0x11111111U, w17);
+ ASSERT_EQUAL_32(0x00000000U, w16);
+ ASSERT_EQUAL_32(0x33333333U, w15);
+ ASSERT_EQUAL_32(0x22222222U, w14);
+
+ ASSERT_EQUAL_32(0x11111111U, w18);
+ ASSERT_EQUAL_32(0x11111111U, w19);
+ ASSERT_EQUAL_32(0x11111111U, w20);
+ ASSERT_EQUAL_32(0x11111111U, w21);
+ ASSERT_EQUAL_64(0x3333333333333333UL, x22);
+ ASSERT_EQUAL_64(0x0000000000000000UL, x23);
+
+ ASSERT_EQUAL_64(0x3333333333333333UL, x24);
+ ASSERT_EQUAL_64(0x3333333333333333UL, x26);
+
+ ASSERT_EQUAL_32(0x33333333U, w25);
+ ASSERT_EQUAL_32(0x00000000U, w27);
+ ASSERT_EQUAL_32(0x22222222U, w28);
+ ASSERT_EQUAL_32(0x33333333U, w29);
+ TEARDOWN();
+}
+
+
+TEST(push_queued) {
+ INIT_V8();
+ SETUP();
+
+ START();
+
+ ASSERT(__ StackPointer().Is(csp));
+ __ Mov(jssp, __ StackPointer());
+ __ SetStackPointer(jssp);
+
+ MacroAssembler::PushPopQueue queue(&masm);
+
+ // Queue up registers.
+ queue.Queue(x0);
+ queue.Queue(x1);
+ queue.Queue(x2);
+ queue.Queue(x3);
+
+ queue.Queue(w4);
+ queue.Queue(w5);
+ queue.Queue(w6);
+
+ queue.Queue(d0);
+ queue.Queue(d1);
+
+ queue.Queue(s2);
+
+ __ Mov(x0, 0x1234000000000000);
+ __ Mov(x1, 0x1234000100010001);
+ __ Mov(x2, 0x1234000200020002);
+ __ Mov(x3, 0x1234000300030003);
+ __ Mov(w4, 0x12340004);
+ __ Mov(w5, 0x12340005);
+ __ Mov(w6, 0x12340006);
+ __ Fmov(d0, 123400.0);
+ __ Fmov(d1, 123401.0);
+ __ Fmov(s2, 123402.0);
+
+ // Actually push them.
+ queue.PushQueued();
+
+ Clobber(&masm, CPURegList(CPURegister::kRegister, kXRegSizeInBits, 0, 6));
+ Clobber(&masm, CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, 0, 2));
+
+ // Pop them conventionally.
+ __ Pop(s2);
+ __ Pop(d1, d0);
+ __ Pop(w6, w5, w4);
+ __ Pop(x3, x2, x1, x0);
+
+ __ Mov(csp, __ StackPointer());
+ __ SetStackPointer(csp);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x1234000000000000, x0);
+ ASSERT_EQUAL_64(0x1234000100010001, x1);
+ ASSERT_EQUAL_64(0x1234000200020002, x2);
+ ASSERT_EQUAL_64(0x1234000300030003, x3);
+
+ ASSERT_EQUAL_32(0x12340004, w4);
+ ASSERT_EQUAL_32(0x12340005, w5);
+ ASSERT_EQUAL_32(0x12340006, w6);
+
+ ASSERT_EQUAL_FP64(123400.0, d0);
+ ASSERT_EQUAL_FP64(123401.0, d1);
+
+ ASSERT_EQUAL_FP32(123402.0, s2);
+
+ TEARDOWN();
+}
+
+
+TEST(pop_queued) {
+ INIT_V8();
+ SETUP();
+
+ START();
+
+ ASSERT(__ StackPointer().Is(csp));
+ __ Mov(jssp, __ StackPointer());
+ __ SetStackPointer(jssp);
+
+ MacroAssembler::PushPopQueue queue(&masm);
+
+ __ Mov(x0, 0x1234000000000000);
+ __ Mov(x1, 0x1234000100010001);
+ __ Mov(x2, 0x1234000200020002);
+ __ Mov(x3, 0x1234000300030003);
+ __ Mov(w4, 0x12340004);
+ __ Mov(w5, 0x12340005);
+ __ Mov(w6, 0x12340006);
+ __ Fmov(d0, 123400.0);
+ __ Fmov(d1, 123401.0);
+ __ Fmov(s2, 123402.0);
+
+ // Push registers conventionally.
+ __ Push(x0, x1, x2, x3);
+ __ Push(w4, w5, w6);
+ __ Push(d0, d1);
+ __ Push(s2);
+
+ // Queue up a pop.
+ queue.Queue(s2);
+
+ queue.Queue(d1);
+ queue.Queue(d0);
+
+ queue.Queue(w6);
+ queue.Queue(w5);
+ queue.Queue(w4);
+
+ queue.Queue(x3);
+ queue.Queue(x2);
+ queue.Queue(x1);
+ queue.Queue(x0);
+
+ Clobber(&masm, CPURegList(CPURegister::kRegister, kXRegSizeInBits, 0, 6));
+ Clobber(&masm, CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, 0, 2));
+
+ // Actually pop them.
+ queue.PopQueued();
+
+ __ Mov(csp, __ StackPointer());
+ __ SetStackPointer(csp);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x1234000000000000, x0);
+ ASSERT_EQUAL_64(0x1234000100010001, x1);
+ ASSERT_EQUAL_64(0x1234000200020002, x2);
+ ASSERT_EQUAL_64(0x1234000300030003, x3);
+
+ ASSERT_EQUAL_64(0x0000000012340004, x4);
+ ASSERT_EQUAL_64(0x0000000012340005, x5);
+ ASSERT_EQUAL_64(0x0000000012340006, x6);
+
+ ASSERT_EQUAL_FP64(123400.0, d0);
+ ASSERT_EQUAL_FP64(123401.0, d1);
+
+ ASSERT_EQUAL_FP32(123402.0, s2);
+
+ TEARDOWN();
+}
+
+
+TEST(jump_both_smi) {
+ INIT_V8();
+ SETUP();
+
+ Label cond_pass_00, cond_pass_01, cond_pass_10, cond_pass_11;
+ Label cond_fail_00, cond_fail_01, cond_fail_10, cond_fail_11;
+ Label return1, return2, return3, done;
+
+ START();
+
+ __ Mov(x0, 0x5555555500000001UL); // A pointer.
+ __ Mov(x1, 0xaaaaaaaa00000001UL); // A pointer.
+ __ Mov(x2, 0x1234567800000000UL); // A smi.
+ __ Mov(x3, 0x8765432100000000UL); // A smi.
+ __ Mov(x4, 0xdead);
+ __ Mov(x5, 0xdead);
+ __ Mov(x6, 0xdead);
+ __ Mov(x7, 0xdead);
+
+ __ JumpIfBothSmi(x0, x1, &cond_pass_00, &cond_fail_00);
+ __ Bind(&return1);
+ __ JumpIfBothSmi(x0, x2, &cond_pass_01, &cond_fail_01);
+ __ Bind(&return2);
+ __ JumpIfBothSmi(x2, x1, &cond_pass_10, &cond_fail_10);
+ __ Bind(&return3);
+ __ JumpIfBothSmi(x2, x3, &cond_pass_11, &cond_fail_11);
+
+ __ Bind(&cond_fail_00);
+ __ Mov(x4, 0);
+ __ B(&return1);
+ __ Bind(&cond_pass_00);
+ __ Mov(x4, 1);
+ __ B(&return1);
+
+ __ Bind(&cond_fail_01);
+ __ Mov(x5, 0);
+ __ B(&return2);
+ __ Bind(&cond_pass_01);
+ __ Mov(x5, 1);
+ __ B(&return2);
+
+ __ Bind(&cond_fail_10);
+ __ Mov(x6, 0);
+ __ B(&return3);
+ __ Bind(&cond_pass_10);
+ __ Mov(x6, 1);
+ __ B(&return3);
+
+ __ Bind(&cond_fail_11);
+ __ Mov(x7, 0);
+ __ B(&done);
+ __ Bind(&cond_pass_11);
+ __ Mov(x7, 1);
+
+ __ Bind(&done);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x5555555500000001UL, x0);
+ ASSERT_EQUAL_64(0xaaaaaaaa00000001UL, x1);
+ ASSERT_EQUAL_64(0x1234567800000000UL, x2);
+ ASSERT_EQUAL_64(0x8765432100000000UL, x3);
+ ASSERT_EQUAL_64(0, x4);
+ ASSERT_EQUAL_64(0, x5);
+ ASSERT_EQUAL_64(0, x6);
+ ASSERT_EQUAL_64(1, x7);
+
+ TEARDOWN();
+}
+
+
+TEST(jump_either_smi) {
+ INIT_V8();
+ SETUP();
+
+ Label cond_pass_00, cond_pass_01, cond_pass_10, cond_pass_11;
+ Label cond_fail_00, cond_fail_01, cond_fail_10, cond_fail_11;
+ Label return1, return2, return3, done;
+
+ START();
+
+ __ Mov(x0, 0x5555555500000001UL); // A pointer.
+ __ Mov(x1, 0xaaaaaaaa00000001UL); // A pointer.
+ __ Mov(x2, 0x1234567800000000UL); // A smi.
+ __ Mov(x3, 0x8765432100000000UL); // A smi.
+ __ Mov(x4, 0xdead);
+ __ Mov(x5, 0xdead);
+ __ Mov(x6, 0xdead);
+ __ Mov(x7, 0xdead);
+
+ __ JumpIfEitherSmi(x0, x1, &cond_pass_00, &cond_fail_00);
+ __ Bind(&return1);
+ __ JumpIfEitherSmi(x0, x2, &cond_pass_01, &cond_fail_01);
+ __ Bind(&return2);
+ __ JumpIfEitherSmi(x2, x1, &cond_pass_10, &cond_fail_10);
+ __ Bind(&return3);
+ __ JumpIfEitherSmi(x2, x3, &cond_pass_11, &cond_fail_11);
+
+ __ Bind(&cond_fail_00);
+ __ Mov(x4, 0);
+ __ B(&return1);
+ __ Bind(&cond_pass_00);
+ __ Mov(x4, 1);
+ __ B(&return1);
+
+ __ Bind(&cond_fail_01);
+ __ Mov(x5, 0);
+ __ B(&return2);
+ __ Bind(&cond_pass_01);
+ __ Mov(x5, 1);
+ __ B(&return2);
+
+ __ Bind(&cond_fail_10);
+ __ Mov(x6, 0);
+ __ B(&return3);
+ __ Bind(&cond_pass_10);
+ __ Mov(x6, 1);
+ __ B(&return3);
+
+ __ Bind(&cond_fail_11);
+ __ Mov(x7, 0);
+ __ B(&done);
+ __ Bind(&cond_pass_11);
+ __ Mov(x7, 1);
+
+ __ Bind(&done);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x5555555500000001UL, x0);
+ ASSERT_EQUAL_64(0xaaaaaaaa00000001UL, x1);
+ ASSERT_EQUAL_64(0x1234567800000000UL, x2);
+ ASSERT_EQUAL_64(0x8765432100000000UL, x3);
+ ASSERT_EQUAL_64(0, x4);
+ ASSERT_EQUAL_64(1, x5);
+ ASSERT_EQUAL_64(1, x6);
+ ASSERT_EQUAL_64(1, x7);
+
+ TEARDOWN();
+}
+
+
+TEST(noreg) {
+ // This test doesn't generate any code, but it verifies some invariants
+ // related to NoReg.
+ CHECK(NoReg.Is(NoFPReg));
+ CHECK(NoFPReg.Is(NoReg));
+ CHECK(NoReg.Is(NoCPUReg));
+ CHECK(NoCPUReg.Is(NoReg));
+ CHECK(NoFPReg.Is(NoCPUReg));
+ CHECK(NoCPUReg.Is(NoFPReg));
+
+ CHECK(NoReg.IsNone());
+ CHECK(NoFPReg.IsNone());
+ CHECK(NoCPUReg.IsNone());
+}
+
+
+TEST(isvalid) {
+ // This test doesn't generate any code, but it verifies some invariants
+ // related to IsValid().
+ CHECK(!NoReg.IsValid());
+ CHECK(!NoFPReg.IsValid());
+ CHECK(!NoCPUReg.IsValid());
+
+ CHECK(x0.IsValid());
+ CHECK(w0.IsValid());
+ CHECK(x30.IsValid());
+ CHECK(w30.IsValid());
+ CHECK(xzr.IsValid());
+ CHECK(wzr.IsValid());
+
+ CHECK(csp.IsValid());
+ CHECK(wcsp.IsValid());
+
+ CHECK(d0.IsValid());
+ CHECK(s0.IsValid());
+ CHECK(d31.IsValid());
+ CHECK(s31.IsValid());
+
+ CHECK(x0.IsValidRegister());
+ CHECK(w0.IsValidRegister());
+ CHECK(xzr.IsValidRegister());
+ CHECK(wzr.IsValidRegister());
+ CHECK(csp.IsValidRegister());
+ CHECK(wcsp.IsValidRegister());
+ CHECK(!x0.IsValidFPRegister());
+ CHECK(!w0.IsValidFPRegister());
+ CHECK(!xzr.IsValidFPRegister());
+ CHECK(!wzr.IsValidFPRegister());
+ CHECK(!csp.IsValidFPRegister());
+ CHECK(!wcsp.IsValidFPRegister());
+
+ CHECK(d0.IsValidFPRegister());
+ CHECK(s0.IsValidFPRegister());
+ CHECK(!d0.IsValidRegister());
+ CHECK(!s0.IsValidRegister());
+
+ // Test the same as before, but using CPURegister types. This shouldn't make
+ // any difference.
+ CHECK(static_cast<CPURegister>(x0).IsValid());
+ CHECK(static_cast<CPURegister>(w0).IsValid());
+ CHECK(static_cast<CPURegister>(x30).IsValid());
+ CHECK(static_cast<CPURegister>(w30).IsValid());
+ CHECK(static_cast<CPURegister>(xzr).IsValid());
+ CHECK(static_cast<CPURegister>(wzr).IsValid());
+
+ CHECK(static_cast<CPURegister>(csp).IsValid());
+ CHECK(static_cast<CPURegister>(wcsp).IsValid());
+
+ CHECK(static_cast<CPURegister>(d0).IsValid());
+ CHECK(static_cast<CPURegister>(s0).IsValid());
+ CHECK(static_cast<CPURegister>(d31).IsValid());
+ CHECK(static_cast<CPURegister>(s31).IsValid());
+
+ CHECK(static_cast<CPURegister>(x0).IsValidRegister());
+ CHECK(static_cast<CPURegister>(w0).IsValidRegister());
+ CHECK(static_cast<CPURegister>(xzr).IsValidRegister());
+ CHECK(static_cast<CPURegister>(wzr).IsValidRegister());
+ CHECK(static_cast<CPURegister>(csp).IsValidRegister());
+ CHECK(static_cast<CPURegister>(wcsp).IsValidRegister());
+ CHECK(!static_cast<CPURegister>(x0).IsValidFPRegister());
+ CHECK(!static_cast<CPURegister>(w0).IsValidFPRegister());
+ CHECK(!static_cast<CPURegister>(xzr).IsValidFPRegister());
+ CHECK(!static_cast<CPURegister>(wzr).IsValidFPRegister());
+ CHECK(!static_cast<CPURegister>(csp).IsValidFPRegister());
+ CHECK(!static_cast<CPURegister>(wcsp).IsValidFPRegister());
+
+ CHECK(static_cast<CPURegister>(d0).IsValidFPRegister());
+ CHECK(static_cast<CPURegister>(s0).IsValidFPRegister());
+ CHECK(!static_cast<CPURegister>(d0).IsValidRegister());
+ CHECK(!static_cast<CPURegister>(s0).IsValidRegister());
+}
+
+
+TEST(cpureglist_utils_x) {
+ // This test doesn't generate any code, but it verifies the behaviour of
+ // the CPURegList utility methods.
+
+ // Test a list of X registers.
+ CPURegList test(x0, x1, x2, x3);
+
+ CHECK(test.IncludesAliasOf(x0));
+ CHECK(test.IncludesAliasOf(x1));
+ CHECK(test.IncludesAliasOf(x2));
+ CHECK(test.IncludesAliasOf(x3));
+ CHECK(test.IncludesAliasOf(w0));
+ CHECK(test.IncludesAliasOf(w1));
+ CHECK(test.IncludesAliasOf(w2));
+ CHECK(test.IncludesAliasOf(w3));
+
+ CHECK(!test.IncludesAliasOf(x4));
+ CHECK(!test.IncludesAliasOf(x30));
+ CHECK(!test.IncludesAliasOf(xzr));
+ CHECK(!test.IncludesAliasOf(csp));
+ CHECK(!test.IncludesAliasOf(w4));
+ CHECK(!test.IncludesAliasOf(w30));
+ CHECK(!test.IncludesAliasOf(wzr));
+ CHECK(!test.IncludesAliasOf(wcsp));
+
+ CHECK(!test.IncludesAliasOf(d0));
+ CHECK(!test.IncludesAliasOf(d1));
+ CHECK(!test.IncludesAliasOf(d2));
+ CHECK(!test.IncludesAliasOf(d3));
+ CHECK(!test.IncludesAliasOf(s0));
+ CHECK(!test.IncludesAliasOf(s1));
+ CHECK(!test.IncludesAliasOf(s2));
+ CHECK(!test.IncludesAliasOf(s3));
+
+ CHECK(!test.IsEmpty());
+
+ CHECK(test.type() == x0.type());
+
+ CHECK(test.PopHighestIndex().Is(x3));
+ CHECK(test.PopLowestIndex().Is(x0));
+
+ CHECK(test.IncludesAliasOf(x1));
+ CHECK(test.IncludesAliasOf(x2));
+ CHECK(test.IncludesAliasOf(w1));
+ CHECK(test.IncludesAliasOf(w2));
+ CHECK(!test.IncludesAliasOf(x0));
+ CHECK(!test.IncludesAliasOf(x3));
+ CHECK(!test.IncludesAliasOf(w0));
+ CHECK(!test.IncludesAliasOf(w3));
+
+ CHECK(test.PopHighestIndex().Is(x2));
+ CHECK(test.PopLowestIndex().Is(x1));
+
+ CHECK(!test.IncludesAliasOf(x1));
+ CHECK(!test.IncludesAliasOf(x2));
+ CHECK(!test.IncludesAliasOf(w1));
+ CHECK(!test.IncludesAliasOf(w2));
+
+ CHECK(test.IsEmpty());
+}
+
+
+TEST(cpureglist_utils_w) {
+ // This test doesn't generate any code, but it verifies the behaviour of
+ // the CPURegList utility methods.
+
+ // Test a list of W registers.
+ CPURegList test(w10, w11, w12, w13);
+
+ CHECK(test.IncludesAliasOf(x10));
+ CHECK(test.IncludesAliasOf(x11));
+ CHECK(test.IncludesAliasOf(x12));
+ CHECK(test.IncludesAliasOf(x13));
+ CHECK(test.IncludesAliasOf(w10));
+ CHECK(test.IncludesAliasOf(w11));
+ CHECK(test.IncludesAliasOf(w12));
+ CHECK(test.IncludesAliasOf(w13));
+
+ CHECK(!test.IncludesAliasOf(x0));
+ CHECK(!test.IncludesAliasOf(x9));
+ CHECK(!test.IncludesAliasOf(x14));
+ CHECK(!test.IncludesAliasOf(x30));
+ CHECK(!test.IncludesAliasOf(xzr));
+ CHECK(!test.IncludesAliasOf(csp));
+ CHECK(!test.IncludesAliasOf(w0));
+ CHECK(!test.IncludesAliasOf(w9));
+ CHECK(!test.IncludesAliasOf(w14));
+ CHECK(!test.IncludesAliasOf(w30));
+ CHECK(!test.IncludesAliasOf(wzr));
+ CHECK(!test.IncludesAliasOf(wcsp));
+
+ CHECK(!test.IncludesAliasOf(d10));
+ CHECK(!test.IncludesAliasOf(d11));
+ CHECK(!test.IncludesAliasOf(d12));
+ CHECK(!test.IncludesAliasOf(d13));
+ CHECK(!test.IncludesAliasOf(s10));
+ CHECK(!test.IncludesAliasOf(s11));
+ CHECK(!test.IncludesAliasOf(s12));
+ CHECK(!test.IncludesAliasOf(s13));
+
+ CHECK(!test.IsEmpty());
+
+ CHECK(test.type() == w10.type());
+
+ CHECK(test.PopHighestIndex().Is(w13));
+ CHECK(test.PopLowestIndex().Is(w10));
+
+ CHECK(test.IncludesAliasOf(x11));
+ CHECK(test.IncludesAliasOf(x12));
+ CHECK(test.IncludesAliasOf(w11));
+ CHECK(test.IncludesAliasOf(w12));
+ CHECK(!test.IncludesAliasOf(x10));
+ CHECK(!test.IncludesAliasOf(x13));
+ CHECK(!test.IncludesAliasOf(w10));
+ CHECK(!test.IncludesAliasOf(w13));
+
+ CHECK(test.PopHighestIndex().Is(w12));
+ CHECK(test.PopLowestIndex().Is(w11));
+
+ CHECK(!test.IncludesAliasOf(x11));
+ CHECK(!test.IncludesAliasOf(x12));
+ CHECK(!test.IncludesAliasOf(w11));
+ CHECK(!test.IncludesAliasOf(w12));
+
+ CHECK(test.IsEmpty());
+}
+
+
+TEST(cpureglist_utils_d) {
+ // This test doesn't generate any code, but it verifies the behaviour of
+ // the CPURegList utility methods.
+
+ // Test a list of D registers.
+ CPURegList test(d20, d21, d22, d23);
+
+ CHECK(test.IncludesAliasOf(d20));
+ CHECK(test.IncludesAliasOf(d21));
+ CHECK(test.IncludesAliasOf(d22));
+ CHECK(test.IncludesAliasOf(d23));
+ CHECK(test.IncludesAliasOf(s20));
+ CHECK(test.IncludesAliasOf(s21));
+ CHECK(test.IncludesAliasOf(s22));
+ CHECK(test.IncludesAliasOf(s23));
+
+ CHECK(!test.IncludesAliasOf(d0));
+ CHECK(!test.IncludesAliasOf(d19));
+ CHECK(!test.IncludesAliasOf(d24));
+ CHECK(!test.IncludesAliasOf(d31));
+ CHECK(!test.IncludesAliasOf(s0));
+ CHECK(!test.IncludesAliasOf(s19));
+ CHECK(!test.IncludesAliasOf(s24));
+ CHECK(!test.IncludesAliasOf(s31));
+
+ CHECK(!test.IncludesAliasOf(x20));
+ CHECK(!test.IncludesAliasOf(x21));
+ CHECK(!test.IncludesAliasOf(x22));
+ CHECK(!test.IncludesAliasOf(x23));
+ CHECK(!test.IncludesAliasOf(w20));
+ CHECK(!test.IncludesAliasOf(w21));
+ CHECK(!test.IncludesAliasOf(w22));
+ CHECK(!test.IncludesAliasOf(w23));
+
+ CHECK(!test.IncludesAliasOf(xzr));
+ CHECK(!test.IncludesAliasOf(wzr));
+ CHECK(!test.IncludesAliasOf(csp));
+ CHECK(!test.IncludesAliasOf(wcsp));
+
+ CHECK(!test.IsEmpty());
+
+ CHECK(test.type() == d20.type());
+
+ CHECK(test.PopHighestIndex().Is(d23));
+ CHECK(test.PopLowestIndex().Is(d20));
+
+ CHECK(test.IncludesAliasOf(d21));
+ CHECK(test.IncludesAliasOf(d22));
+ CHECK(test.IncludesAliasOf(s21));
+ CHECK(test.IncludesAliasOf(s22));
+ CHECK(!test.IncludesAliasOf(d20));
+ CHECK(!test.IncludesAliasOf(d23));
+ CHECK(!test.IncludesAliasOf(s20));
+ CHECK(!test.IncludesAliasOf(s23));
+
+ CHECK(test.PopHighestIndex().Is(d22));
+ CHECK(test.PopLowestIndex().Is(d21));
+
+ CHECK(!test.IncludesAliasOf(d21));
+ CHECK(!test.IncludesAliasOf(d22));
+ CHECK(!test.IncludesAliasOf(s21));
+ CHECK(!test.IncludesAliasOf(s22));
+
+ CHECK(test.IsEmpty());
+}
+
+
+TEST(cpureglist_utils_s) {
+ // This test doesn't generate any code, but it verifies the behaviour of
+ // the CPURegList utility methods.
+
+ // Test a list of S registers.
+ CPURegList test(s20, s21, s22, s23);
+
+ // The type and size mechanisms are already covered, so here we just test
+ // that lists of S registers alias individual D registers.
+
+ CHECK(test.IncludesAliasOf(d20));
+ CHECK(test.IncludesAliasOf(d21));
+ CHECK(test.IncludesAliasOf(d22));
+ CHECK(test.IncludesAliasOf(d23));
+ CHECK(test.IncludesAliasOf(s20));
+ CHECK(test.IncludesAliasOf(s21));
+ CHECK(test.IncludesAliasOf(s22));
+ CHECK(test.IncludesAliasOf(s23));
+}
+
+
+TEST(cpureglist_utils_empty) {
+ // This test doesn't generate any code, but it verifies the behaviour of
+ // the CPURegList utility methods.
+
+ // Test an empty list.
+ // Empty lists can have type and size properties. Check that we can create
+ // them, and that they are empty.
+ CPURegList reg32(CPURegister::kRegister, kWRegSizeInBits, 0);
+ CPURegList reg64(CPURegister::kRegister, kXRegSizeInBits, 0);
+ CPURegList fpreg32(CPURegister::kFPRegister, kSRegSizeInBits, 0);
+ CPURegList fpreg64(CPURegister::kFPRegister, kDRegSizeInBits, 0);
+
+ CHECK(reg32.IsEmpty());
+ CHECK(reg64.IsEmpty());
+ CHECK(fpreg32.IsEmpty());
+ CHECK(fpreg64.IsEmpty());
+
+ CHECK(reg32.PopLowestIndex().IsNone());
+ CHECK(reg64.PopLowestIndex().IsNone());
+ CHECK(fpreg32.PopLowestIndex().IsNone());
+ CHECK(fpreg64.PopLowestIndex().IsNone());
+
+ CHECK(reg32.PopHighestIndex().IsNone());
+ CHECK(reg64.PopHighestIndex().IsNone());
+ CHECK(fpreg32.PopHighestIndex().IsNone());
+ CHECK(fpreg64.PopHighestIndex().IsNone());
+
+ CHECK(reg32.IsEmpty());
+ CHECK(reg64.IsEmpty());
+ CHECK(fpreg32.IsEmpty());
+ CHECK(fpreg64.IsEmpty());
+}
+
+
+TEST(printf) {
+ INIT_V8();
+ SETUP();
+ START();
+
+ char const * test_plain_string = "Printf with no arguments.\n";
+ char const * test_substring = "'This is a substring.'";
+ RegisterDump before;
+
+ // Initialize x29 to the value of the stack pointer. We will use x29 as a
+ // temporary stack pointer later, and initializing it in this way allows the
+ // RegisterDump check to pass.
+ __ Mov(x29, __ StackPointer());
+
+ // Test simple integer arguments.
+ __ Mov(x0, 1234);
+ __ Mov(x1, 0x1234);
+
+ // Test simple floating-point arguments.
+ __ Fmov(d0, 1.234);
+
+ // Test pointer (string) arguments.
+ __ Mov(x2, reinterpret_cast<uintptr_t>(test_substring));
+
+ // Test the maximum number of arguments, and sign extension.
+ __ Mov(w3, 0xffffffff);
+ __ Mov(w4, 0xffffffff);
+ __ Mov(x5, 0xffffffffffffffff);
+ __ Mov(x6, 0xffffffffffffffff);
+ __ Fmov(s1, 1.234);
+ __ Fmov(s2, 2.345);
+ __ Fmov(d3, 3.456);
+ __ Fmov(d4, 4.567);
+
+ // Test printing callee-saved registers.
+ __ Mov(x28, 0x123456789abcdef);
+ __ Fmov(d10, 42.0);
+
+ // Test with three arguments.
+ __ Mov(x10, 3);
+ __ Mov(x11, 40);
+ __ Mov(x12, 500);
+
+ // x8 and x9 are used by debug code in part of the macro assembler. However,
+ // Printf guarantees to preserve them (so we can use Printf in debug code),
+ // and we need to test that they are properly preserved. The above code
+ // shouldn't need to use them, but we initialize x8 and x9 last to be on the
+ // safe side. This test still assumes that none of the code from
+ // before->Dump() to the end of the test can clobber x8 or x9, so where
+ // possible we use the Assembler directly to be safe.
+ __ orr(x8, xzr, 0x8888888888888888);
+ __ orr(x9, xzr, 0x9999999999999999);
+
+ // Check that we don't clobber any registers, except those that we explicitly
+ // write results into.
+ before.Dump(&masm);
+
+ __ Printf(test_plain_string); // NOLINT(runtime/printf)
+ __ Printf("x0: %" PRId64", x1: 0x%08" PRIx64 "\n", x0, x1);
+ __ Printf("d0: %f\n", d0);
+ __ Printf("Test %%s: %s\n", x2);
+ __ Printf("w3(uint32): %" PRIu32 "\nw4(int32): %" PRId32 "\n"
+ "x5(uint64): %" PRIu64 "\nx6(int64): %" PRId64 "\n",
+ w3, w4, x5, x6);
+ __ Printf("%%f: %f\n%%g: %g\n%%e: %e\n%%E: %E\n", s1, s2, d3, d4);
+ __ Printf("0x%08" PRIx32 ", 0x%016" PRIx64 "\n", x28, x28);
+ __ Printf("%g\n", d10);
+
+ // Test with a different stack pointer.
+ const Register old_stack_pointer = __ StackPointer();
+ __ mov(x29, old_stack_pointer);
+ __ SetStackPointer(x29);
+ __ Printf("old_stack_pointer: 0x%016" PRIx64 "\n", old_stack_pointer);
+ __ mov(old_stack_pointer, __ StackPointer());
+ __ SetStackPointer(old_stack_pointer);
+
+ __ Printf("3=%u, 4=%u, 5=%u\n", x10, x11, x12);
+
+ END();
+ RUN();
+
+ // We cannot easily test the output of the Printf sequences, and because
+ // Printf preserves all registers by default, we can't look at the number of
+ // bytes that were printed. However, the printf_no_preserve test should check
+ // that, and here we just test that we didn't clobber any registers.
+ ASSERT_EQUAL_REGISTERS(before);
+
+ TEARDOWN();
+}
+
+
+TEST(printf_no_preserve) {
+ INIT_V8();
+ SETUP();
+ START();
+
+ char const * test_plain_string = "Printf with no arguments.\n";
+ char const * test_substring = "'This is a substring.'";
+
+ __ PrintfNoPreserve(test_plain_string); // NOLINT(runtime/printf)
+ __ Mov(x19, x0);
+
+ // Test simple integer arguments.
+ __ Mov(x0, 1234);
+ __ Mov(x1, 0x1234);
+ __ PrintfNoPreserve("x0: %" PRId64", x1: 0x%08" PRIx64 "\n", x0, x1);
+ __ Mov(x20, x0);
+
+ // Test simple floating-point arguments.
+ __ Fmov(d0, 1.234);
+ __ PrintfNoPreserve("d0: %f\n", d0);
+ __ Mov(x21, x0);
+
+ // Test pointer (string) arguments.
+ __ Mov(x2, reinterpret_cast<uintptr_t>(test_substring));
+ __ PrintfNoPreserve("Test %%s: %s\n", x2);
+ __ Mov(x22, x0);
+
+ // Test the maximum number of arguments, and sign extension.
+ __ Mov(w3, 0xffffffff);
+ __ Mov(w4, 0xffffffff);
+ __ Mov(x5, 0xffffffffffffffff);
+ __ Mov(x6, 0xffffffffffffffff);
+ __ PrintfNoPreserve("w3(uint32): %" PRIu32 "\nw4(int32): %" PRId32 "\n"
+ "x5(uint64): %" PRIu64 "\nx6(int64): %" PRId64 "\n",
+ w3, w4, x5, x6);
+ __ Mov(x23, x0);
+
+ __ Fmov(s1, 1.234);
+ __ Fmov(s2, 2.345);
+ __ Fmov(d3, 3.456);
+ __ Fmov(d4, 4.567);
+ __ PrintfNoPreserve("%%f: %f\n%%g: %g\n%%e: %e\n%%E: %E\n", s1, s2, d3, d4);
+ __ Mov(x24, x0);
+
+ // Test printing callee-saved registers.
+ __ Mov(x28, 0x123456789abcdef);
+ __ PrintfNoPreserve("0x%08" PRIx32 ", 0x%016" PRIx64 "\n", x28, x28);
+ __ Mov(x25, x0);
+
+ __ Fmov(d10, 42.0);
+ __ PrintfNoPreserve("%g\n", d10);
+ __ Mov(x26, x0);
+
+ // Test with a different stack pointer.
+ const Register old_stack_pointer = __ StackPointer();
+ __ Mov(x29, old_stack_pointer);
+ __ SetStackPointer(x29);
+
+ __ PrintfNoPreserve("old_stack_pointer: 0x%016" PRIx64 "\n",
+ old_stack_pointer);
+ __ Mov(x27, x0);
+
+ __ Mov(old_stack_pointer, __ StackPointer());
+ __ SetStackPointer(old_stack_pointer);
+
+ // Test with three arguments.
+ __ Mov(x3, 3);
+ __ Mov(x4, 40);
+ __ Mov(x5, 500);
+ __ PrintfNoPreserve("3=%u, 4=%u, 5=%u\n", x3, x4, x5);
+ __ Mov(x28, x0);
+
+ END();
+ RUN();
+
+ // We cannot easily test the exact output of the Printf sequences, but we can
+ // use the return code to check that the string length was correct.
+
+ // Printf with no arguments.
+ ASSERT_EQUAL_64(strlen(test_plain_string), x19);
+ // x0: 1234, x1: 0x00001234
+ ASSERT_EQUAL_64(25, x20);
+ // d0: 1.234000
+ ASSERT_EQUAL_64(13, x21);
+ // Test %s: 'This is a substring.'
+ ASSERT_EQUAL_64(32, x22);
+ // w3(uint32): 4294967295
+ // w4(int32): -1
+ // x5(uint64): 18446744073709551615
+ // x6(int64): -1
+ ASSERT_EQUAL_64(23 + 14 + 33 + 14, x23);
+ // %f: 1.234000
+ // %g: 2.345
+ // %e: 3.456000e+00
+ // %E: 4.567000E+00
+ ASSERT_EQUAL_64(13 + 10 + 17 + 17, x24);
+ // 0x89abcdef, 0x0123456789abcdef
+ ASSERT_EQUAL_64(31, x25);
+ // 42
+ ASSERT_EQUAL_64(3, x26);
+ // old_stack_pointer: 0x00007fb037ae2370
+ // Note: This is an example value, but the field width is fixed here so the
+ // string length is still predictable.
+ ASSERT_EQUAL_64(38, x27);
+ // 3=3, 4=40, 5=500
+ ASSERT_EQUAL_64(17, x28);
+
+ TEARDOWN();
+}
+
+
+// This is a V8-specific test.
+static void CopyFieldsHelper(CPURegList temps) {
+ static const uint64_t kLiteralBase = 0x0100001000100101UL;
+ static const uint64_t src[] = {kLiteralBase * 1,
+ kLiteralBase * 2,
+ kLiteralBase * 3,
+ kLiteralBase * 4,
+ kLiteralBase * 5,
+ kLiteralBase * 6,
+ kLiteralBase * 7,
+ kLiteralBase * 8,
+ kLiteralBase * 9,
+ kLiteralBase * 10,
+ kLiteralBase * 11};
+ static const uint64_t src_tagged =
+ reinterpret_cast<uint64_t>(src) + kHeapObjectTag;
+
+ static const unsigned kTestCount = sizeof(src) / sizeof(src[0]) + 1;
+ uint64_t* dst[kTestCount];
+ uint64_t dst_tagged[kTestCount];
+
+ // The first test will be to copy 0 fields. The destination (and source)
+ // should not be accessed in any way.
+ dst[0] = NULL;
+ dst_tagged[0] = kHeapObjectTag;
+
+ // Allocate memory for each other test. Each test <n> will have <n> fields.
+ // This is intended to exercise as many paths in CopyFields as possible.
+ for (unsigned i = 1; i < kTestCount; i++) {
+ dst[i] = new uint64_t[i];
+ memset(dst[i], 0, i * sizeof(kLiteralBase));
+ dst_tagged[i] = reinterpret_cast<uint64_t>(dst[i]) + kHeapObjectTag;
+ }
+
+ SETUP();
+ START();
+
+ __ Mov(x0, dst_tagged[0]);
+ __ Mov(x1, 0);
+ __ CopyFields(x0, x1, temps, 0);
+ for (unsigned i = 1; i < kTestCount; i++) {
+ __ Mov(x0, dst_tagged[i]);
+ __ Mov(x1, src_tagged);
+ __ CopyFields(x0, x1, temps, i);
+ }
+
+ END();
+ RUN();
+ TEARDOWN();
+
+ for (unsigned i = 1; i < kTestCount; i++) {
+ for (unsigned j = 0; j < i; j++) {
+ CHECK(src[j] == dst[i][j]);
+ }
+ delete [] dst[i];
+ }
+}
+
+
+// This is a V8-specific test.
+TEST(copyfields) {
+ INIT_V8();
+ CopyFieldsHelper(CPURegList(x10));
+ CopyFieldsHelper(CPURegList(x10, x11));
+ CopyFieldsHelper(CPURegList(x10, x11, x12));
+ CopyFieldsHelper(CPURegList(x10, x11, x12, x13));
+}
+
+
+static void DoSmiAbsTest(int32_t value, bool must_fail = false) {
+ SETUP();
+
+ START();
+ Label end, slow;
+ __ Mov(x2, 0xc001c0de);
+ __ Mov(x1, value);
+ __ SmiTag(x1);
+ __ SmiAbs(x1, &slow);
+ __ SmiUntag(x1);
+ __ B(&end);
+
+ __ Bind(&slow);
+ __ Mov(x2, 0xbad);
+
+ __ Bind(&end);
+ END();
+
+ RUN();
+
+ if (must_fail) {
+ // We tested an invalid conversion. The code must have jump on slow.
+ ASSERT_EQUAL_64(0xbad, x2);
+ } else {
+ // The conversion is valid, check the result.
+ int32_t result = (value >= 0) ? value : -value;
+ ASSERT_EQUAL_64(result, x1);
+
+ // Check that we didn't jump on slow.
+ ASSERT_EQUAL_64(0xc001c0de, x2);
+ }
+
+ TEARDOWN();
+}
+
+
+TEST(smi_abs) {
+ INIT_V8();
+ // Simple and edge cases.
+ DoSmiAbsTest(0);
+ DoSmiAbsTest(0x12345);
+ DoSmiAbsTest(0x40000000);
+ DoSmiAbsTest(0x7fffffff);
+ DoSmiAbsTest(-1);
+ DoSmiAbsTest(-12345);
+ DoSmiAbsTest(0x80000001);
+
+ // Check that the most negative SMI is detected.
+ DoSmiAbsTest(0x80000000, true);
+}
+
+
+TEST(blr_lr) {
+ // A simple test to check that the simulator correcty handle "blr lr".
+ INIT_V8();
+ SETUP();
+
+ START();
+ Label target;
+ Label end;
+
+ __ Mov(x0, 0x0);
+ __ Adr(lr, &target);
+
+ __ Blr(lr);
+ __ Mov(x0, 0xdeadbeef);
+ __ B(&end);
+
+ __ Bind(&target);
+ __ Mov(x0, 0xc001c0de);
+
+ __ Bind(&end);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xc001c0de, x0);
+
+ TEARDOWN();
+}
+
+
+TEST(barriers) {
+ // Generate all supported barriers, this is just a smoke test
+ INIT_V8();
+ SETUP();
+
+ START();
+
+ // DMB
+ __ Dmb(FullSystem, BarrierAll);
+ __ Dmb(FullSystem, BarrierReads);
+ __ Dmb(FullSystem, BarrierWrites);
+ __ Dmb(FullSystem, BarrierOther);
+
+ __ Dmb(InnerShareable, BarrierAll);
+ __ Dmb(InnerShareable, BarrierReads);
+ __ Dmb(InnerShareable, BarrierWrites);
+ __ Dmb(InnerShareable, BarrierOther);
+
+ __ Dmb(NonShareable, BarrierAll);
+ __ Dmb(NonShareable, BarrierReads);
+ __ Dmb(NonShareable, BarrierWrites);
+ __ Dmb(NonShareable, BarrierOther);
+
+ __ Dmb(OuterShareable, BarrierAll);
+ __ Dmb(OuterShareable, BarrierReads);
+ __ Dmb(OuterShareable, BarrierWrites);
+ __ Dmb(OuterShareable, BarrierOther);
+
+ // DSB
+ __ Dsb(FullSystem, BarrierAll);
+ __ Dsb(FullSystem, BarrierReads);
+ __ Dsb(FullSystem, BarrierWrites);
+ __ Dsb(FullSystem, BarrierOther);
+
+ __ Dsb(InnerShareable, BarrierAll);
+ __ Dsb(InnerShareable, BarrierReads);
+ __ Dsb(InnerShareable, BarrierWrites);
+ __ Dsb(InnerShareable, BarrierOther);
+
+ __ Dsb(NonShareable, BarrierAll);
+ __ Dsb(NonShareable, BarrierReads);
+ __ Dsb(NonShareable, BarrierWrites);
+ __ Dsb(NonShareable, BarrierOther);
+
+ __ Dsb(OuterShareable, BarrierAll);
+ __ Dsb(OuterShareable, BarrierReads);
+ __ Dsb(OuterShareable, BarrierWrites);
+ __ Dsb(OuterShareable, BarrierOther);
+
+ // ISB
+ __ Isb();
+
+ END();
+
+ RUN();
+
+ TEARDOWN();
+}
+
+
+TEST(process_nan_double) {
+ INIT_V8();
+ // Make sure that NaN propagation works correctly.
+ double sn = rawbits_to_double(0x7ff5555511111111);
+ double qn = rawbits_to_double(0x7ffaaaaa11111111);
+ ASSERT(IsSignallingNaN(sn));
+ ASSERT(IsQuietNaN(qn));
+
+ // The input NaNs after passing through ProcessNaN.
+ double sn_proc = rawbits_to_double(0x7ffd555511111111);
+ double qn_proc = qn;
+ ASSERT(IsQuietNaN(sn_proc));
+ ASSERT(IsQuietNaN(qn_proc));
+
+ SETUP();
+ START();
+
+ // Execute a number of instructions which all use ProcessNaN, and check that
+ // they all handle the NaN correctly.
+ __ Fmov(d0, sn);
+ __ Fmov(d10, qn);
+
+ // Operations that always propagate NaNs unchanged, even signalling NaNs.
+ // - Signalling NaN
+ __ Fmov(d1, d0);
+ __ Fabs(d2, d0);
+ __ Fneg(d3, d0);
+ // - Quiet NaN
+ __ Fmov(d11, d10);
+ __ Fabs(d12, d10);
+ __ Fneg(d13, d10);
+
+ // Operations that use ProcessNaN.
+ // - Signalling NaN
+ __ Fsqrt(d4, d0);
+ __ Frinta(d5, d0);
+ __ Frintn(d6, d0);
+ __ Frintz(d7, d0);
+ // - Quiet NaN
+ __ Fsqrt(d14, d10);
+ __ Frinta(d15, d10);
+ __ Frintn(d16, d10);
+ __ Frintz(d17, d10);
+
+ // The behaviour of fcvt is checked in TEST(fcvt_sd).
+
+ END();
+ RUN();
+
+ uint64_t qn_raw = double_to_rawbits(qn);
+ uint64_t sn_raw = double_to_rawbits(sn);
+
+ // - Signalling NaN
+ ASSERT_EQUAL_FP64(sn, d1);
+ ASSERT_EQUAL_FP64(rawbits_to_double(sn_raw & ~kDSignMask), d2);
+ ASSERT_EQUAL_FP64(rawbits_to_double(sn_raw ^ kDSignMask), d3);
+ // - Quiet NaN
+ ASSERT_EQUAL_FP64(qn, d11);
+ ASSERT_EQUAL_FP64(rawbits_to_double(qn_raw & ~kDSignMask), d12);
+ ASSERT_EQUAL_FP64(rawbits_to_double(qn_raw ^ kDSignMask), d13);
+
+ // - Signalling NaN
+ ASSERT_EQUAL_FP64(sn_proc, d4);
+ ASSERT_EQUAL_FP64(sn_proc, d5);
+ ASSERT_EQUAL_FP64(sn_proc, d6);
+ ASSERT_EQUAL_FP64(sn_proc, d7);
+ // - Quiet NaN
+ ASSERT_EQUAL_FP64(qn_proc, d14);
+ ASSERT_EQUAL_FP64(qn_proc, d15);
+ ASSERT_EQUAL_FP64(qn_proc, d16);
+ ASSERT_EQUAL_FP64(qn_proc, d17);
+
+ TEARDOWN();
+}
+
+
+TEST(process_nan_float) {
+ INIT_V8();
+ // Make sure that NaN propagation works correctly.
+ float sn = rawbits_to_float(0x7f951111);
+ float qn = rawbits_to_float(0x7fea1111);
+ ASSERT(IsSignallingNaN(sn));
+ ASSERT(IsQuietNaN(qn));
+
+ // The input NaNs after passing through ProcessNaN.
+ float sn_proc = rawbits_to_float(0x7fd51111);
+ float qn_proc = qn;
+ ASSERT(IsQuietNaN(sn_proc));
+ ASSERT(IsQuietNaN(qn_proc));
+
+ SETUP();
+ START();
+
+ // Execute a number of instructions which all use ProcessNaN, and check that
+ // they all handle the NaN correctly.
+ __ Fmov(s0, sn);
+ __ Fmov(s10, qn);
+
+ // Operations that always propagate NaNs unchanged, even signalling NaNs.
+ // - Signalling NaN
+ __ Fmov(s1, s0);
+ __ Fabs(s2, s0);
+ __ Fneg(s3, s0);
+ // - Quiet NaN
+ __ Fmov(s11, s10);
+ __ Fabs(s12, s10);
+ __ Fneg(s13, s10);
+
+ // Operations that use ProcessNaN.
+ // - Signalling NaN
+ __ Fsqrt(s4, s0);
+ __ Frinta(s5, s0);
+ __ Frintn(s6, s0);
+ __ Frintz(s7, s0);
+ // - Quiet NaN
+ __ Fsqrt(s14, s10);
+ __ Frinta(s15, s10);
+ __ Frintn(s16, s10);
+ __ Frintz(s17, s10);
+
+ // The behaviour of fcvt is checked in TEST(fcvt_sd).
+
+ END();
+ RUN();
+
+ uint32_t qn_raw = float_to_rawbits(qn);
+ uint32_t sn_raw = float_to_rawbits(sn);
+
+ // - Signalling NaN
+ ASSERT_EQUAL_FP32(sn, s1);
+ ASSERT_EQUAL_FP32(rawbits_to_float(sn_raw & ~kSSignMask), s2);
+ ASSERT_EQUAL_FP32(rawbits_to_float(sn_raw ^ kSSignMask), s3);
+ // - Quiet NaN
+ ASSERT_EQUAL_FP32(qn, s11);
+ ASSERT_EQUAL_FP32(rawbits_to_float(qn_raw & ~kSSignMask), s12);
+ ASSERT_EQUAL_FP32(rawbits_to_float(qn_raw ^ kSSignMask), s13);
+
+ // - Signalling NaN
+ ASSERT_EQUAL_FP32(sn_proc, s4);
+ ASSERT_EQUAL_FP32(sn_proc, s5);
+ ASSERT_EQUAL_FP32(sn_proc, s6);
+ ASSERT_EQUAL_FP32(sn_proc, s7);
+ // - Quiet NaN
+ ASSERT_EQUAL_FP32(qn_proc, s14);
+ ASSERT_EQUAL_FP32(qn_proc, s15);
+ ASSERT_EQUAL_FP32(qn_proc, s16);
+ ASSERT_EQUAL_FP32(qn_proc, s17);
+
+ TEARDOWN();
+}
+
+
+static void ProcessNaNsHelper(double n, double m, double expected) {
+ ASSERT(std::isnan(n) || std::isnan(m));
+ ASSERT(isnan(expected));
+
+ SETUP();
+ START();
+
+ // Execute a number of instructions which all use ProcessNaNs, and check that
+ // they all propagate NaNs correctly.
+ __ Fmov(d0, n);
+ __ Fmov(d1, m);
+
+ __ Fadd(d2, d0, d1);
+ __ Fsub(d3, d0, d1);
+ __ Fmul(d4, d0, d1);
+ __ Fdiv(d5, d0, d1);
+ __ Fmax(d6, d0, d1);
+ __ Fmin(d7, d0, d1);
+
+ END();
+ RUN();
+
+ ASSERT_EQUAL_FP64(expected, d2);
+ ASSERT_EQUAL_FP64(expected, d3);
+ ASSERT_EQUAL_FP64(expected, d4);
+ ASSERT_EQUAL_FP64(expected, d5);
+ ASSERT_EQUAL_FP64(expected, d6);
+ ASSERT_EQUAL_FP64(expected, d7);
+
+ TEARDOWN();
+}
+
+
+TEST(process_nans_double) {
+ INIT_V8();
+ // Make sure that NaN propagation works correctly.
+ double sn = rawbits_to_double(0x7ff5555511111111);
+ double sm = rawbits_to_double(0x7ff5555522222222);
+ double qn = rawbits_to_double(0x7ffaaaaa11111111);
+ double qm = rawbits_to_double(0x7ffaaaaa22222222);
+ ASSERT(IsSignallingNaN(sn));
+ ASSERT(IsSignallingNaN(sm));
+ ASSERT(IsQuietNaN(qn));
+ ASSERT(IsQuietNaN(qm));
+
+ // The input NaNs after passing through ProcessNaN.
+ double sn_proc = rawbits_to_double(0x7ffd555511111111);
+ double sm_proc = rawbits_to_double(0x7ffd555522222222);
+ double qn_proc = qn;
+ double qm_proc = qm;
+ ASSERT(IsQuietNaN(sn_proc));
+ ASSERT(IsQuietNaN(sm_proc));
+ ASSERT(IsQuietNaN(qn_proc));
+ ASSERT(IsQuietNaN(qm_proc));
+
+ // Quiet NaNs are propagated.
+ ProcessNaNsHelper(qn, 0, qn_proc);
+ ProcessNaNsHelper(0, qm, qm_proc);
+ ProcessNaNsHelper(qn, qm, qn_proc);
+
+ // Signalling NaNs are propagated, and made quiet.
+ ProcessNaNsHelper(sn, 0, sn_proc);
+ ProcessNaNsHelper(0, sm, sm_proc);
+ ProcessNaNsHelper(sn, sm, sn_proc);
+
+ // Signalling NaNs take precedence over quiet NaNs.
+ ProcessNaNsHelper(sn, qm, sn_proc);
+ ProcessNaNsHelper(qn, sm, sm_proc);
+ ProcessNaNsHelper(sn, sm, sn_proc);
+}
+
+
+static void ProcessNaNsHelper(float n, float m, float expected) {
+ ASSERT(std::isnan(n) || std::isnan(m));
+ ASSERT(isnan(expected));
+
+ SETUP();
+ START();
+
+ // Execute a number of instructions which all use ProcessNaNs, and check that
+ // they all propagate NaNs correctly.
+ __ Fmov(s0, n);
+ __ Fmov(s1, m);
+
+ __ Fadd(s2, s0, s1);
+ __ Fsub(s3, s0, s1);
+ __ Fmul(s4, s0, s1);
+ __ Fdiv(s5, s0, s1);
+ __ Fmax(s6, s0, s1);
+ __ Fmin(s7, s0, s1);
+
+ END();
+ RUN();
+
+ ASSERT_EQUAL_FP32(expected, s2);
+ ASSERT_EQUAL_FP32(expected, s3);
+ ASSERT_EQUAL_FP32(expected, s4);
+ ASSERT_EQUAL_FP32(expected, s5);
+ ASSERT_EQUAL_FP32(expected, s6);
+ ASSERT_EQUAL_FP32(expected, s7);
+
+ TEARDOWN();
+}
+
+
+TEST(process_nans_float) {
+ INIT_V8();
+ // Make sure that NaN propagation works correctly.
+ float sn = rawbits_to_float(0x7f951111);
+ float sm = rawbits_to_float(0x7f952222);
+ float qn = rawbits_to_float(0x7fea1111);
+ float qm = rawbits_to_float(0x7fea2222);
+ ASSERT(IsSignallingNaN(sn));
+ ASSERT(IsSignallingNaN(sm));
+ ASSERT(IsQuietNaN(qn));
+ ASSERT(IsQuietNaN(qm));
+
+ // The input NaNs after passing through ProcessNaN.
+ float sn_proc = rawbits_to_float(0x7fd51111);
+ float sm_proc = rawbits_to_float(0x7fd52222);
+ float qn_proc = qn;
+ float qm_proc = qm;
+ ASSERT(IsQuietNaN(sn_proc));
+ ASSERT(IsQuietNaN(sm_proc));
+ ASSERT(IsQuietNaN(qn_proc));
+ ASSERT(IsQuietNaN(qm_proc));
+
+ // Quiet NaNs are propagated.
+ ProcessNaNsHelper(qn, 0, qn_proc);
+ ProcessNaNsHelper(0, qm, qm_proc);
+ ProcessNaNsHelper(qn, qm, qn_proc);
+
+ // Signalling NaNs are propagated, and made quiet.
+ ProcessNaNsHelper(sn, 0, sn_proc);
+ ProcessNaNsHelper(0, sm, sm_proc);
+ ProcessNaNsHelper(sn, sm, sn_proc);
+
+ // Signalling NaNs take precedence over quiet NaNs.
+ ProcessNaNsHelper(sn, qm, sn_proc);
+ ProcessNaNsHelper(qn, sm, sm_proc);
+ ProcessNaNsHelper(sn, sm, sn_proc);
+}
+
+
+static void DefaultNaNHelper(float n, float m, float a) {
+ ASSERT(std::isnan(n) || std::isnan(m) || isnan(a));
+
+ bool test_1op = std::isnan(n);
+ bool test_2op = std::isnan(n) || std::isnan(m);
+
+ SETUP();
+ START();
+
+ // Enable Default-NaN mode in the FPCR.
+ __ Mrs(x0, FPCR);
+ __ Orr(x1, x0, DN_mask);
+ __ Msr(FPCR, x1);
+
+ // Execute a number of instructions which all use ProcessNaNs, and check that
+ // they all produce the default NaN.
+ __ Fmov(s0, n);
+ __ Fmov(s1, m);
+ __ Fmov(s2, a);
+
+ if (test_1op) {
+ // Operations that always propagate NaNs unchanged, even signalling NaNs.
+ __ Fmov(s10, s0);
+ __ Fabs(s11, s0);
+ __ Fneg(s12, s0);
+
+ // Operations that use ProcessNaN.
+ __ Fsqrt(s13, s0);
+ __ Frinta(s14, s0);
+ __ Frintn(s15, s0);
+ __ Frintz(s16, s0);
+
+ // Fcvt usually has special NaN handling, but it respects default-NaN mode.
+ __ Fcvt(d17, s0);
+ }
+
+ if (test_2op) {
+ __ Fadd(s18, s0, s1);
+ __ Fsub(s19, s0, s1);
+ __ Fmul(s20, s0, s1);
+ __ Fdiv(s21, s0, s1);
+ __ Fmax(s22, s0, s1);
+ __ Fmin(s23, s0, s1);
+ }
+
+ __ Fmadd(s24, s0, s1, s2);
+ __ Fmsub(s25, s0, s1, s2);
+ __ Fnmadd(s26, s0, s1, s2);
+ __ Fnmsub(s27, s0, s1, s2);
+
+ // Restore FPCR.
+ __ Msr(FPCR, x0);
+
+ END();
+ RUN();
+
+ if (test_1op) {
+ uint32_t n_raw = float_to_rawbits(n);
+ ASSERT_EQUAL_FP32(n, s10);
+ ASSERT_EQUAL_FP32(rawbits_to_float(n_raw & ~kSSignMask), s11);
+ ASSERT_EQUAL_FP32(rawbits_to_float(n_raw ^ kSSignMask), s12);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s13);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s14);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s15);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s16);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d17);
+ }
+
+ if (test_2op) {
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s18);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s19);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s20);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s21);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s22);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s23);
+ }
+
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s24);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s25);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s26);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s27);
+
+ TEARDOWN();
+}
+
+
+TEST(default_nan_float) {
+ INIT_V8();
+ float sn = rawbits_to_float(0x7f951111);
+ float sm = rawbits_to_float(0x7f952222);
+ float sa = rawbits_to_float(0x7f95aaaa);
+ float qn = rawbits_to_float(0x7fea1111);
+ float qm = rawbits_to_float(0x7fea2222);
+ float qa = rawbits_to_float(0x7feaaaaa);
+ ASSERT(IsSignallingNaN(sn));
+ ASSERT(IsSignallingNaN(sm));
+ ASSERT(IsSignallingNaN(sa));
+ ASSERT(IsQuietNaN(qn));
+ ASSERT(IsQuietNaN(qm));
+ ASSERT(IsQuietNaN(qa));
+
+ // - Signalling NaNs
+ DefaultNaNHelper(sn, 0.0f, 0.0f);
+ DefaultNaNHelper(0.0f, sm, 0.0f);
+ DefaultNaNHelper(0.0f, 0.0f, sa);
+ DefaultNaNHelper(sn, sm, 0.0f);
+ DefaultNaNHelper(0.0f, sm, sa);
+ DefaultNaNHelper(sn, 0.0f, sa);
+ DefaultNaNHelper(sn, sm, sa);
+ // - Quiet NaNs
+ DefaultNaNHelper(qn, 0.0f, 0.0f);
+ DefaultNaNHelper(0.0f, qm, 0.0f);
+ DefaultNaNHelper(0.0f, 0.0f, qa);
+ DefaultNaNHelper(qn, qm, 0.0f);
+ DefaultNaNHelper(0.0f, qm, qa);
+ DefaultNaNHelper(qn, 0.0f, qa);
+ DefaultNaNHelper(qn, qm, qa);
+ // - Mixed NaNs
+ DefaultNaNHelper(qn, sm, sa);
+ DefaultNaNHelper(sn, qm, sa);
+ DefaultNaNHelper(sn, sm, qa);
+ DefaultNaNHelper(qn, qm, sa);
+ DefaultNaNHelper(sn, qm, qa);
+ DefaultNaNHelper(qn, sm, qa);
+ DefaultNaNHelper(qn, qm, qa);
+}
+
+
+static void DefaultNaNHelper(double n, double m, double a) {
+ ASSERT(std::isnan(n) || std::isnan(m) || isnan(a));
+
+ bool test_1op = std::isnan(n);
+ bool test_2op = std::isnan(n) || std::isnan(m);
+
+ SETUP();
+ START();
+
+ // Enable Default-NaN mode in the FPCR.
+ __ Mrs(x0, FPCR);
+ __ Orr(x1, x0, DN_mask);
+ __ Msr(FPCR, x1);
+
+ // Execute a number of instructions which all use ProcessNaNs, and check that
+ // they all produce the default NaN.
+ __ Fmov(d0, n);
+ __ Fmov(d1, m);
+ __ Fmov(d2, a);
+
+ if (test_1op) {
+ // Operations that always propagate NaNs unchanged, even signalling NaNs.
+ __ Fmov(d10, d0);
+ __ Fabs(d11, d0);
+ __ Fneg(d12, d0);
+
+ // Operations that use ProcessNaN.
+ __ Fsqrt(d13, d0);
+ __ Frinta(d14, d0);
+ __ Frintn(d15, d0);
+ __ Frintz(d16, d0);
+
+ // Fcvt usually has special NaN handling, but it respects default-NaN mode.
+ __ Fcvt(s17, d0);
+ }
+
+ if (test_2op) {
+ __ Fadd(d18, d0, d1);
+ __ Fsub(d19, d0, d1);
+ __ Fmul(d20, d0, d1);
+ __ Fdiv(d21, d0, d1);
+ __ Fmax(d22, d0, d1);
+ __ Fmin(d23, d0, d1);
+ }
+
+ __ Fmadd(d24, d0, d1, d2);
+ __ Fmsub(d25, d0, d1, d2);
+ __ Fnmadd(d26, d0, d1, d2);
+ __ Fnmsub(d27, d0, d1, d2);
+
+ // Restore FPCR.
+ __ Msr(FPCR, x0);
+
+ END();
+ RUN();
+
+ if (test_1op) {
+ uint64_t n_raw = double_to_rawbits(n);
+ ASSERT_EQUAL_FP64(n, d10);
+ ASSERT_EQUAL_FP64(rawbits_to_double(n_raw & ~kDSignMask), d11);
+ ASSERT_EQUAL_FP64(rawbits_to_double(n_raw ^ kDSignMask), d12);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d14);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d15);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d16);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s17);
+ }
+
+ if (test_2op) {
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d18);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d19);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d20);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d21);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d22);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d23);
+ }
+
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d24);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d25);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d26);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d27);
+
+ TEARDOWN();
+}
+
+
+TEST(default_nan_double) {
+ INIT_V8();
+ double sn = rawbits_to_double(0x7ff5555511111111);
+ double sm = rawbits_to_double(0x7ff5555522222222);
+ double sa = rawbits_to_double(0x7ff55555aaaaaaaa);
+ double qn = rawbits_to_double(0x7ffaaaaa11111111);
+ double qm = rawbits_to_double(0x7ffaaaaa22222222);
+ double qa = rawbits_to_double(0x7ffaaaaaaaaaaaaa);
+ ASSERT(IsSignallingNaN(sn));
+ ASSERT(IsSignallingNaN(sm));
+ ASSERT(IsSignallingNaN(sa));
+ ASSERT(IsQuietNaN(qn));
+ ASSERT(IsQuietNaN(qm));
+ ASSERT(IsQuietNaN(qa));
+
+ // - Signalling NaNs
+ DefaultNaNHelper(sn, 0.0, 0.0);
+ DefaultNaNHelper(0.0, sm, 0.0);
+ DefaultNaNHelper(0.0, 0.0, sa);
+ DefaultNaNHelper(sn, sm, 0.0);
+ DefaultNaNHelper(0.0, sm, sa);
+ DefaultNaNHelper(sn, 0.0, sa);
+ DefaultNaNHelper(sn, sm, sa);
+ // - Quiet NaNs
+ DefaultNaNHelper(qn, 0.0, 0.0);
+ DefaultNaNHelper(0.0, qm, 0.0);
+ DefaultNaNHelper(0.0, 0.0, qa);
+ DefaultNaNHelper(qn, qm, 0.0);
+ DefaultNaNHelper(0.0, qm, qa);
+ DefaultNaNHelper(qn, 0.0, qa);
+ DefaultNaNHelper(qn, qm, qa);
+ // - Mixed NaNs
+ DefaultNaNHelper(qn, sm, sa);
+ DefaultNaNHelper(sn, qm, sa);
+ DefaultNaNHelper(sn, sm, qa);
+ DefaultNaNHelper(qn, qm, sa);
+ DefaultNaNHelper(sn, qm, qa);
+ DefaultNaNHelper(qn, sm, qa);
+ DefaultNaNHelper(qn, qm, qa);
+}
+
+
+TEST(call_no_relocation) {
+ Address call_start;
+ Address return_address;
+
+ INIT_V8();
+ SETUP();
+
+ START();
+
+ Label function;
+ Label test;
+
+ __ B(&test);
+
+ __ Bind(&function);
+ __ Mov(x0, 0x1);
+ __ Ret();
+
+ __ Bind(&test);
+ __ Mov(x0, 0x0);
+ __ Push(lr, xzr);
+ {
+ Assembler::BlockConstPoolScope scope(&masm);
+ call_start = buf + __ pc_offset();
+ __ Call(buf + function.pos(), RelocInfo::NONE64);
+ return_address = buf + __ pc_offset();
+ }
+ __ Pop(xzr, lr);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+
+ // The return_address_from_call_start function doesn't currently encounter any
+ // non-relocatable sequences, so we check it here to make sure it works.
+ // TODO(jbramley): Once Crankshaft is complete, decide if we need to support
+ // non-relocatable calls at all.
+ CHECK(return_address ==
+ Assembler::return_address_from_call_start(call_start));
+
+ TEARDOWN();
+}
+
+
+static void AbsHelperX(int64_t value) {
+ int64_t expected;
+
+ SETUP();
+ START();
+
+ Label fail;
+ Label done;
+
+ __ Mov(x0, 0);
+ __ Mov(x1, value);
+
+ if (value != kXMinInt) {
+ expected = labs(value);
+
+ Label next;
+ // The result is representable.
+ __ Abs(x10, x1);
+ __ Abs(x11, x1, &fail);
+ __ Abs(x12, x1, &fail, &next);
+ __ Bind(&next);
+ __ Abs(x13, x1, NULL, &done);
+ } else {
+ // labs is undefined for kXMinInt but our implementation in the
+ // MacroAssembler will return kXMinInt in such a case.
+ expected = kXMinInt;
+
+ Label next;
+ // The result is not representable.
+ __ Abs(x10, x1);
+ __ Abs(x11, x1, NULL, &fail);
+ __ Abs(x12, x1, &next, &fail);
+ __ Bind(&next);
+ __ Abs(x13, x1, &done);
+ }
+
+ __ Bind(&fail);
+ __ Mov(x0, -1);
+
+ __ Bind(&done);
+
+ END();
+ RUN();
+
+ ASSERT_EQUAL_64(0, x0);
+ ASSERT_EQUAL_64(value, x1);
+ ASSERT_EQUAL_64(expected, x10);
+ ASSERT_EQUAL_64(expected, x11);
+ ASSERT_EQUAL_64(expected, x12);
+ ASSERT_EQUAL_64(expected, x13);
+
+ TEARDOWN();
+}
+
+
+static void AbsHelperW(int32_t value) {
+ int32_t expected;
+
+ SETUP();
+ START();
+
+ Label fail;
+ Label done;
+
+ __ Mov(w0, 0);
+ // TODO(jbramley): The cast is needed to avoid a sign-extension bug in VIXL.
+ // Once it is fixed, we should remove the cast.
+ __ Mov(w1, static_cast<uint32_t>(value));
+
+ if (value != kWMinInt) {
+ expected = abs(value);
+
+ Label next;
+ // The result is representable.
+ __ Abs(w10, w1);
+ __ Abs(w11, w1, &fail);
+ __ Abs(w12, w1, &fail, &next);
+ __ Bind(&next);
+ __ Abs(w13, w1, NULL, &done);
+ } else {
+ // abs is undefined for kWMinInt but our implementation in the
+ // MacroAssembler will return kWMinInt in such a case.
+ expected = kWMinInt;
+
+ Label next;
+ // The result is not representable.
+ __ Abs(w10, w1);
+ __ Abs(w11, w1, NULL, &fail);
+ __ Abs(w12, w1, &next, &fail);
+ __ Bind(&next);
+ __ Abs(w13, w1, &done);
+ }
+
+ __ Bind(&fail);
+ __ Mov(w0, -1);
+
+ __ Bind(&done);
+
+ END();
+ RUN();
+
+ ASSERT_EQUAL_32(0, w0);
+ ASSERT_EQUAL_32(value, w1);
+ ASSERT_EQUAL_32(expected, w10);
+ ASSERT_EQUAL_32(expected, w11);
+ ASSERT_EQUAL_32(expected, w12);
+ ASSERT_EQUAL_32(expected, w13);
+
+ TEARDOWN();
+}
+
+
+TEST(abs) {
+ INIT_V8();
+ AbsHelperX(0);
+ AbsHelperX(42);
+ AbsHelperX(-42);
+ AbsHelperX(kXMinInt);
+ AbsHelperX(kXMaxInt);
+
+ AbsHelperW(0);
+ AbsHelperW(42);
+ AbsHelperW(-42);
+ AbsHelperW(kWMinInt);
+ AbsHelperW(kWMaxInt);
+}
+
+
+TEST(pool_size) {
+ INIT_V8();
+ SETUP();
+
+ // This test does not execute any code. It only tests that the size of the
+ // pools is read correctly from the RelocInfo.
+
+ Label exit;
+ __ b(&exit);
+
+ const unsigned constant_pool_size = 312;
+ const unsigned veneer_pool_size = 184;
+
+ __ RecordConstPool(constant_pool_size);
+ for (unsigned i = 0; i < constant_pool_size / 4; ++i) {
+ __ dc32(0);
+ }
+
+ __ RecordVeneerPool(masm.pc_offset(), veneer_pool_size);
+ for (unsigned i = 0; i < veneer_pool_size / kInstructionSize; ++i) {
+ __ nop();
+ }
+
+ __ bind(&exit);
+
+ Heap* heap = isolate->heap();
+ CodeDesc desc;
+ Object* code_object = NULL;
+ Code* code;
+ masm.GetCode(&desc);
+ MaybeObject* maybe_code = heap->CreateCode(desc, 0, masm.CodeObject());
+ maybe_code->ToObject(&code_object);
+ code = Code::cast(code_object);
+
+ unsigned pool_count = 0;
+ int pool_mask = RelocInfo::ModeMask(RelocInfo::CONST_POOL) |
+ RelocInfo::ModeMask(RelocInfo::VENEER_POOL);
+ for (RelocIterator it(code, pool_mask); !it.done(); it.next()) {
+ RelocInfo* info = it.rinfo();
+ if (RelocInfo::IsConstPool(info->rmode())) {
+ ASSERT(info->data() == constant_pool_size);
+ ++pool_count;
+ }
+ if (RelocInfo::IsVeneerPool(info->rmode())) {
+ ASSERT(info->data() == veneer_pool_size);
+ ++pool_count;
+ }
+ }
+
+ ASSERT(pool_count == 2);
+
+ TEARDOWN();
+}
diff --git a/deps/v8/test/cctest/test-assembler-x64.cc b/deps/v8/test/cctest/test-assembler-x64.cc
index 4fa5ffecb..446cec6ad 100644
--- a/deps/v8/test/cctest/test-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-assembler-x64.cc
@@ -99,15 +99,15 @@ TEST(AssemblerX64StackOperations) {
// Assemble a simple function that copies argument 2 and returns it.
// We compile without stack frame pointers, so the gdb debugger shows
// incorrect stack frames when debugging this function (which has them).
- __ push(rbp);
+ __ pushq(rbp);
__ movq(rbp, rsp);
- __ push(arg2); // Value at (rbp - 8)
- __ push(arg2); // Value at (rbp - 16)
- __ push(arg1); // Value at (rbp - 24)
- __ pop(rax);
- __ pop(rax);
- __ pop(rax);
- __ pop(rbp);
+ __ pushq(arg2); // Value at (rbp - 8)
+ __ pushq(arg2); // Value at (rbp - 16)
+ __ pushq(arg1); // Value at (rbp - 24)
+ __ popq(rax);
+ __ popq(rax);
+ __ popq(rax);
+ __ popq(rbp);
__ nop();
__ ret(0);
@@ -153,7 +153,7 @@ TEST(AssemblerX64ImulOperation) {
// Assemble a simple function that multiplies arguments returning the high
// word.
__ movq(rax, arg2);
- __ imul(arg1);
+ __ imulq(arg1);
__ movq(rax, rdx);
__ ret(0);
@@ -330,19 +330,19 @@ TEST(AssemblerX64MemoryOperands) {
Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
// Assemble a simple function that copies argument 2 and returns it.
- __ push(rbp);
+ __ pushq(rbp);
__ movq(rbp, rsp);
- __ push(arg2); // Value at (rbp - 8)
- __ push(arg2); // Value at (rbp - 16)
- __ push(arg1); // Value at (rbp - 24)
+ __ pushq(arg2); // Value at (rbp - 8)
+ __ pushq(arg2); // Value at (rbp - 16)
+ __ pushq(arg1); // Value at (rbp - 24)
const int kStackElementSize = 8;
__ movq(rax, Operand(rbp, -3 * kStackElementSize));
- __ pop(arg2);
- __ pop(arg2);
- __ pop(arg2);
- __ pop(rbp);
+ __ popq(arg2);
+ __ popq(arg2);
+ __ popq(arg2);
+ __ popq(rbp);
__ nop();
__ ret(0);
@@ -364,7 +364,7 @@ TEST(AssemblerX64ControlFlow) {
Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
// Assemble a simple function that copies argument 1 and returns it.
- __ push(rbp);
+ __ pushq(rbp);
__ movq(rbp, rsp);
__ movq(rax, arg1);
@@ -372,7 +372,7 @@ TEST(AssemblerX64ControlFlow) {
__ jmp(&target);
__ movq(rax, arg2);
__ bind(&target);
- __ pop(rbp);
+ __ popq(rbp);
__ ret(0);
CodeDesc desc;
@@ -496,11 +496,11 @@ TEST(AssemblerMultiByteNop) {
byte buffer[1024];
Isolate* isolate = CcTest::i_isolate();
Assembler assm(isolate, buffer, sizeof(buffer));
- __ push(rbx);
- __ push(rcx);
- __ push(rdx);
- __ push(rdi);
- __ push(rsi);
+ __ pushq(rbx);
+ __ pushq(rcx);
+ __ pushq(rdx);
+ __ pushq(rdi);
+ __ pushq(rsi);
__ movq(rax, Immediate(1));
__ movq(rbx, Immediate(2));
__ movq(rcx, Immediate(3));
@@ -527,19 +527,19 @@ TEST(AssemblerMultiByteNop) {
__ cmpq(rsi, Immediate(6));
__ j(not_equal, &fail);
__ movq(rax, Immediate(42));
- __ pop(rsi);
- __ pop(rdi);
- __ pop(rdx);
- __ pop(rcx);
- __ pop(rbx);
+ __ popq(rsi);
+ __ popq(rdi);
+ __ popq(rdx);
+ __ popq(rcx);
+ __ popq(rbx);
__ ret(0);
__ bind(&fail);
__ movq(rax, Immediate(13));
- __ pop(rsi);
- __ pop(rdi);
- __ pop(rdx);
- __ pop(rcx);
- __ pop(rbx);
+ __ popq(rsi);
+ __ popq(rdi);
+ __ popq(rdx);
+ __ popq(rcx);
+ __ popq(rbx);
__ ret(0);
CodeDesc desc;
@@ -571,14 +571,14 @@ void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
Assembler assm(isolate, buffer, sizeof(buffer));
// Remove return address from the stack for fix stack frame alignment.
- __ pop(rcx);
+ __ popq(rcx);
// Store input vector on the stack.
for (int i = 0; i < ELEMENT_COUNT; i++) {
__ movl(rax, Immediate(vec->Get(i)->Int32Value()));
__ shl(rax, Immediate(0x20));
- __ or_(rax, Immediate(vec->Get(++i)->Int32Value()));
- __ push(rax);
+ __ orq(rax, Immediate(vec->Get(++i)->Int32Value()));
+ __ pushq(rax);
}
// Read vector into a xmm register.
@@ -590,7 +590,7 @@ void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
// Remove unused data from the stack.
__ addq(rsp, Immediate(ELEMENT_COUNT * sizeof(int32_t)));
// Restore return address.
- __ push(rcx);
+ __ pushq(rcx);
__ ret(0);
diff --git a/deps/v8/test/cctest/test-atomicops.cc b/deps/v8/test/cctest/test-atomicops.cc
new file mode 100644
index 000000000..eba956c85
--- /dev/null
+++ b/deps/v8/test/cctest/test-atomicops.cc
@@ -0,0 +1,276 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "cctest.h"
+#include "atomicops.h"
+
+using namespace v8::internal;
+
+
+#define CHECK_EQU(v1, v2) \
+ CHECK_EQ(static_cast<int64_t>(v1), static_cast<int64_t>(v2))
+
+#define NUM_BITS(T) (sizeof(T) * 8)
+
+
+template <class AtomicType>
+static void TestAtomicIncrement() {
+ // For now, we just test the single-threaded execution.
+
+ // Use a guard value to make sure that NoBarrier_AtomicIncrement doesn't
+ // go outside the expected address bounds. This is to test that the
+ // 32-bit NoBarrier_AtomicIncrement doesn't do the wrong thing on 64-bit
+ // machines.
+ struct {
+ AtomicType prev_word;
+ AtomicType count;
+ AtomicType next_word;
+ } s;
+
+ AtomicType prev_word_value, next_word_value;
+ memset(&prev_word_value, 0xFF, sizeof(AtomicType));
+ memset(&next_word_value, 0xEE, sizeof(AtomicType));
+
+ s.prev_word = prev_word_value;
+ s.count = 0;
+ s.next_word = next_word_value;
+
+ CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, 1), 1);
+ CHECK_EQU(s.count, 1);
+ CHECK_EQU(s.prev_word, prev_word_value);
+ CHECK_EQU(s.next_word, next_word_value);
+
+ CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, 2), 3);
+ CHECK_EQU(s.count, 3);
+ CHECK_EQU(s.prev_word, prev_word_value);
+ CHECK_EQU(s.next_word, next_word_value);
+
+ CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, 3), 6);
+ CHECK_EQU(s.count, 6);
+ CHECK_EQU(s.prev_word, prev_word_value);
+ CHECK_EQU(s.next_word, next_word_value);
+
+ CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, -3), 3);
+ CHECK_EQU(s.count, 3);
+ CHECK_EQU(s.prev_word, prev_word_value);
+ CHECK_EQU(s.next_word, next_word_value);
+
+ CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, -2), 1);
+ CHECK_EQU(s.count, 1);
+ CHECK_EQU(s.prev_word, prev_word_value);
+ CHECK_EQU(s.next_word, next_word_value);
+
+ CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, -1), 0);
+ CHECK_EQU(s.count, 0);
+ CHECK_EQU(s.prev_word, prev_word_value);
+ CHECK_EQU(s.next_word, next_word_value);
+
+ CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, -1), -1);
+ CHECK_EQU(s.count, -1);
+ CHECK_EQU(s.prev_word, prev_word_value);
+ CHECK_EQU(s.next_word, next_word_value);
+
+ CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, -4), -5);
+ CHECK_EQU(s.count, -5);
+ CHECK_EQU(s.prev_word, prev_word_value);
+ CHECK_EQU(s.next_word, next_word_value);
+
+ CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, 5), 0);
+ CHECK_EQU(s.count, 0);
+ CHECK_EQU(s.prev_word, prev_word_value);
+ CHECK_EQU(s.next_word, next_word_value);
+}
+
+
+template <class AtomicType>
+static void TestCompareAndSwap() {
+ AtomicType value = 0;
+ AtomicType prev = NoBarrier_CompareAndSwap(&value, 0, 1);
+ CHECK_EQU(1, value);
+ CHECK_EQU(0, prev);
+
+ // Use a test value that has non-zero bits in both halves, for testing
+ // the 64-bit implementation on 32-bit platforms.
+ const AtomicType k_test_val =
+ (static_cast<AtomicType>(1) << (NUM_BITS(AtomicType) - 2)) + 11;
+ value = k_test_val;
+ prev = NoBarrier_CompareAndSwap(&value, 0, 5);
+ CHECK_EQU(k_test_val, value);
+ CHECK_EQU(k_test_val, prev);
+
+ value = k_test_val;
+ prev = NoBarrier_CompareAndSwap(&value, k_test_val, 5);
+ CHECK_EQU(5, value);
+ CHECK_EQU(k_test_val, prev);
+}
+
+
+template <class AtomicType>
+static void TestAtomicExchange() {
+ AtomicType value = 0;
+ AtomicType new_value = NoBarrier_AtomicExchange(&value, 1);
+ CHECK_EQU(1, value);
+ CHECK_EQU(0, new_value);
+
+ // Use a test value that has non-zero bits in both halves, for testing
+ // the 64-bit implementation on 32-bit platforms.
+ const AtomicType k_test_val =
+ (static_cast<AtomicType>(1) << (NUM_BITS(AtomicType) - 2)) + 11;
+ value = k_test_val;
+ new_value = NoBarrier_AtomicExchange(&value, k_test_val);
+ CHECK_EQU(k_test_val, value);
+ CHECK_EQU(k_test_val, new_value);
+
+ value = k_test_val;
+ new_value = NoBarrier_AtomicExchange(&value, 5);
+ CHECK_EQU(5, value);
+ CHECK_EQU(k_test_val, new_value);
+}
+
+
+template <class AtomicType>
+static void TestAtomicIncrementBounds() {
+ // Test at rollover boundary between int_max and int_min.
+ AtomicType test_val =
+ static_cast<AtomicType>(1) << (NUM_BITS(AtomicType) - 1);
+ AtomicType value = -1 ^ test_val;
+ AtomicType new_value = NoBarrier_AtomicIncrement(&value, 1);
+ CHECK_EQU(test_val, value);
+ CHECK_EQU(value, new_value);
+
+ NoBarrier_AtomicIncrement(&value, -1);
+ CHECK_EQU(-1 ^ test_val, value);
+
+ // Test at 32-bit boundary for 64-bit atomic type.
+ test_val = static_cast<AtomicType>(1) << (NUM_BITS(AtomicType) / 2);
+ value = test_val - 1;
+ new_value = NoBarrier_AtomicIncrement(&value, 1);
+ CHECK_EQU(test_val, value);
+ CHECK_EQU(value, new_value);
+
+ NoBarrier_AtomicIncrement(&value, -1);
+ CHECK_EQU(test_val - 1, value);
+}
+
+
+// Return an AtomicType with the value 0xa5a5a5..
+template <class AtomicType>
+static AtomicType TestFillValue() {
+ AtomicType val = 0;
+ memset(&val, 0xa5, sizeof(AtomicType));
+ return val;
+}
+
+
+// This is a simple sanity check to ensure that values are correct.
+// Not testing atomicity.
+template <class AtomicType>
+static void TestStore() {
+ const AtomicType kVal1 = TestFillValue<AtomicType>();
+ const AtomicType kVal2 = static_cast<AtomicType>(-1);
+
+ AtomicType value;
+
+ NoBarrier_Store(&value, kVal1);
+ CHECK_EQU(kVal1, value);
+ NoBarrier_Store(&value, kVal2);
+ CHECK_EQU(kVal2, value);
+
+ Acquire_Store(&value, kVal1);
+ CHECK_EQU(kVal1, value);
+ Acquire_Store(&value, kVal2);
+ CHECK_EQU(kVal2, value);
+
+ Release_Store(&value, kVal1);
+ CHECK_EQU(kVal1, value);
+ Release_Store(&value, kVal2);
+ CHECK_EQU(kVal2, value);
+}
+
+
+// This is a simple sanity check to ensure that values are correct.
+// Not testing atomicity.
+template <class AtomicType>
+static void TestLoad() {
+ const AtomicType kVal1 = TestFillValue<AtomicType>();
+ const AtomicType kVal2 = static_cast<AtomicType>(-1);
+
+ AtomicType value;
+
+ value = kVal1;
+ CHECK_EQU(kVal1, NoBarrier_Load(&value));
+ value = kVal2;
+ CHECK_EQU(kVal2, NoBarrier_Load(&value));
+
+ value = kVal1;
+ CHECK_EQU(kVal1, Acquire_Load(&value));
+ value = kVal2;
+ CHECK_EQU(kVal2, Acquire_Load(&value));
+
+ value = kVal1;
+ CHECK_EQU(kVal1, Release_Load(&value));
+ value = kVal2;
+ CHECK_EQU(kVal2, Release_Load(&value));
+}
+
+
+TEST(AtomicIncrement) {
+ TestAtomicIncrement<Atomic32>();
+ TestAtomicIncrement<AtomicWord>();
+}
+
+
+TEST(CompareAndSwap) {
+ TestCompareAndSwap<Atomic32>();
+ TestCompareAndSwap<AtomicWord>();
+}
+
+
+TEST(AtomicExchange) {
+ TestAtomicExchange<Atomic32>();
+ TestAtomicExchange<AtomicWord>();
+}
+
+
+TEST(AtomicIncrementBounds) {
+ TestAtomicIncrementBounds<Atomic32>();
+ TestAtomicIncrementBounds<AtomicWord>();
+}
+
+
+TEST(Store) {
+ TestStore<Atomic32>();
+ TestStore<AtomicWord>();
+}
+
+
+TEST(Load) {
+ TestLoad<Atomic32>();
+ TestLoad<AtomicWord>();
+}
diff --git a/deps/v8/test/cctest/test-code-stubs-arm64.cc b/deps/v8/test/cctest/test-code-stubs-arm64.cc
new file mode 100644
index 000000000..7ddefdde1
--- /dev/null
+++ b/deps/v8/test/cctest/test-code-stubs-arm64.cc
@@ -0,0 +1,189 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Rrdistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Rrdistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Rrdistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "v8.h"
+
+#include "cctest.h"
+#include "code-stubs.h"
+#include "test-code-stubs.h"
+#include "factory.h"
+#include "macro-assembler.h"
+#include "platform.h"
+#include "simulator.h"
+
+using namespace v8::internal;
+
+#define __ masm.
+
+ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
+ Register source_reg,
+ Register destination_reg,
+ bool inline_fastpath) {
+ // Allocate an executable page of memory.
+ size_t actual_size = 2 * Assembler::kMinimalBufferSize;
+ byte* buffer = static_cast<byte*>(OS::Allocate(actual_size,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ HandleScope handles(isolate);
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size));
+ DoubleToIStub stub(source_reg, destination_reg, 0, true, inline_fastpath);
+
+ byte* start = stub.GetCode(isolate)->instruction_start();
+ Label done;
+
+ __ SetStackPointer(csp);
+ __ PushCalleeSavedRegisters();
+ __ Mov(jssp, csp);
+ __ SetStackPointer(jssp);
+
+ // Push the double argument.
+ __ Push(d0);
+ __ Mov(source_reg, jssp);
+
+ MacroAssembler::PushPopQueue queue(&masm);
+
+ // Save registers make sure they don't get clobbered.
+ int source_reg_offset = kDoubleSize;
+ int reg_num = 0;
+ for (;reg_num < Register::NumAllocatableRegisters(); ++reg_num) {
+ Register reg = Register::from_code(reg_num);
+ if (!reg.is(destination_reg)) {
+ queue.Queue(reg);
+ source_reg_offset += kPointerSize;
+ }
+ }
+ // Re-push the double argument.
+ queue.Queue(d0);
+
+ queue.PushQueued();
+
+ // Call through to the actual stub
+ if (inline_fastpath) {
+ __ Ldr(d0, MemOperand(source_reg));
+ __ TryConvertDoubleToInt64(destination_reg, d0, &done);
+ if (destination_reg.is(source_reg)) {
+ // Restore clobbered source_reg.
+ __ add(source_reg, jssp, Operand(source_reg_offset));
+ }
+ }
+ __ Call(start, RelocInfo::EXTERNAL_REFERENCE);
+ __ bind(&done);
+
+ __ Drop(1, kDoubleSize);
+
+ // // Make sure no registers have been unexpectedly clobbered
+ for (--reg_num; reg_num >= 0; --reg_num) {
+ Register reg = Register::from_code(reg_num);
+ if (!reg.is(destination_reg)) {
+ __ Pop(ip0);
+ __ cmp(reg, ip0);
+ __ Assert(eq, kRegisterWasClobbered);
+ }
+ }
+
+ __ Drop(1, kDoubleSize);
+
+ if (!destination_reg.is(x0))
+ __ Mov(x0, destination_reg);
+
+ // Restore callee save registers.
+ __ Mov(csp, jssp);
+ __ SetStackPointer(csp);
+ __ PopCalleeSavedRegisters();
+
+ __ Ret();
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ CPU::FlushICache(buffer, actual_size);
+ return (reinterpret_cast<ConvertDToIFunc>(
+ reinterpret_cast<intptr_t>(buffer)));
+}
+
+#undef __
+
+
+static Isolate* GetIsolateFrom(LocalContext* context) {
+ return reinterpret_cast<Isolate*>((*context)->GetIsolate());
+}
+
+
+int32_t RunGeneratedCodeCallWrapper(ConvertDToIFunc func,
+ double from) {
+#ifdef USE_SIMULATOR
+ Simulator::CallArgument args[] = {
+ Simulator::CallArgument(from),
+ Simulator::CallArgument::End()
+ };
+ return Simulator::current(Isolate::Current())->CallInt64(
+ FUNCTION_ADDR(func), args);
+#else
+ return (*func)(from);
+#endif
+}
+
+
+TEST(ConvertDToI) {
+ CcTest::InitializeVM();
+ LocalContext context;
+ Isolate* isolate = GetIsolateFrom(&context);
+ HandleScope scope(isolate);
+
+#if DEBUG
+ // Verify that the tests actually work with the C version. In the release
+ // code, the compiler optimizes it away because it's all constant, but does it
+ // wrong, triggering an assert on gcc.
+ RunAllTruncationTests(&ConvertDToICVersion);
+#endif
+
+ Register source_registers[] = {jssp, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9,
+ x10, x11, x12, x13, x14, x15, x18, x19, x20,
+ x21, x22, x23, x24};
+ Register dest_registers[] = {x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11,
+ x12, x13, x14, x15, x18, x19, x20, x21, x22, x23,
+ x24};
+
+ for (size_t s = 0; s < sizeof(source_registers) / sizeof(Register); s++) {
+ for (size_t d = 0; d < sizeof(dest_registers) / sizeof(Register); d++) {
+ RunAllTruncationTests(
+ RunGeneratedCodeCallWrapper,
+ MakeConvertDToIFuncTrampoline(isolate,
+ source_registers[s],
+ dest_registers[d],
+ false));
+ RunAllTruncationTests(
+ RunGeneratedCodeCallWrapper,
+ MakeConvertDToIFuncTrampoline(isolate,
+ source_registers[s],
+ dest_registers[d],
+ true));
+ }
+ }
+}
diff --git a/deps/v8/test/cctest/test-code-stubs-x64.cc b/deps/v8/test/cctest/test-code-stubs-x64.cc
index 6b3a12ccc..348b21aca 100644
--- a/deps/v8/test/cctest/test-code-stubs-x64.cc
+++ b/deps/v8/test/cctest/test-code-stubs-x64.cc
@@ -57,11 +57,11 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
DoubleToIStub stub(source_reg, destination_reg, offset, true);
byte* start = stub.GetCode(isolate)->instruction_start();
- __ push(rbx);
- __ push(rcx);
- __ push(rdx);
- __ push(rsi);
- __ push(rdi);
+ __ pushq(rbx);
+ __ pushq(rcx);
+ __ pushq(rdx);
+ __ pushq(rsi);
+ __ pushq(rdi);
if (!source_reg.is(rsp)) {
// The argument we pass to the stub is not a heap number, but instead
@@ -70,7 +70,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
// registers.
int double_argument_slot =
(Register::NumAllocatableRegisters() - 1) * kPointerSize + kDoubleSize;
- __ lea(source_reg, MemOperand(rsp, -double_argument_slot - offset));
+ __ leaq(source_reg, MemOperand(rsp, -double_argument_slot - offset));
}
// Save registers make sure they don't get clobbered.
@@ -78,7 +78,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
for (;reg_num < Register::NumAllocatableRegisters(); ++reg_num) {
Register reg = Register::FromAllocationIndex(reg_num);
if (!reg.is(rsp) && !reg.is(rbp) && !reg.is(destination_reg)) {
- __ push(reg);
+ __ pushq(reg);
}
}
@@ -103,11 +103,11 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
__ movq(rax, destination_reg);
- __ pop(rdi);
- __ pop(rsi);
- __ pop(rdx);
- __ pop(rcx);
- __ pop(rbx);
+ __ popq(rdi);
+ __ popq(rsi);
+ __ popq(rdx);
+ __ popq(rcx);
+ __ popq(rbx);
__ ret(0);
diff --git a/deps/v8/test/cctest/test-code-stubs.cc b/deps/v8/test/cctest/test-code-stubs.cc
index db00e9ac5..999febf77 100644
--- a/deps/v8/test/cctest/test-code-stubs.cc
+++ b/deps/v8/test/cctest/test-code-stubs.cc
@@ -49,6 +49,9 @@ int STDCALL ConvertDToICVersion(double d) {
int32_t exponent = (((exponent_bits & shifted_mask) >>
(Double::kPhysicalSignificandSize - 32)) -
HeapNumber::kExponentBias);
+ if (exponent < 0) {
+ return 0;
+ }
uint32_t unsigned_exponent = static_cast<uint32_t>(exponent);
int result = 0;
uint32_t max_exponent =
@@ -113,10 +116,27 @@ void RunAllTruncationTests(ConvertDToICallWrapper callWrapper,
RunOneTruncationTest(Infinity, 0);
RunOneTruncationTest(-NaN, 0);
RunOneTruncationTest(-Infinity, 0);
-
- RunOneTruncationTest(4.5036e+15, 0x1635E000);
+ RunOneTruncationTest(4.94065645841e-324, 0);
+ RunOneTruncationTest(-4.94065645841e-324, 0);
+
+ RunOneTruncationTest(0.9999999999999999, 0);
+ RunOneTruncationTest(-0.9999999999999999, 0);
+ RunOneTruncationTest(4294967296.0, 0);
+ RunOneTruncationTest(-4294967296.0, 0);
+ RunOneTruncationTest(9223372036854775000.0, 4294966272.0);
+ RunOneTruncationTest(-9223372036854775000.0, -4294966272.0);
+ RunOneTruncationTest(4.5036e+15, 372629504);
RunOneTruncationTest(-4.5036e+15, -372629504);
+ RunOneTruncationTest(287524199.5377777, 0x11234567);
+ RunOneTruncationTest(-287524199.5377777, -0x11234567);
+ RunOneTruncationTest(2300193596.302222, 2300193596.0);
+ RunOneTruncationTest(-2300193596.302222, -2300193596.0);
+ RunOneTruncationTest(4600387192.604444, 305419896);
+ RunOneTruncationTest(-4600387192.604444, -305419896);
+ RunOneTruncationTest(4823855600872397.0, 1737075661);
+ RunOneTruncationTest(-4823855600872397.0, -1737075661);
+
RunOneTruncationTest(4503603922337791.0, -1);
RunOneTruncationTest(-4503603922337791.0, 1);
RunOneTruncationTest(4503601774854143.0, 2147483647);
@@ -134,10 +154,19 @@ void RunAllTruncationTests(ConvertDToICallWrapper callWrapper,
RunOneTruncationTest(4.8357078901445341e+24, -1073741824);
RunOneTruncationTest(-4.8357078901445341e+24, 1073741824);
+ RunOneTruncationTest(2147483647.0, 2147483647.0);
+ RunOneTruncationTest(-2147483648.0, -2147483648.0);
RunOneTruncationTest(9.6714111686030497e+24, -2147483648.0);
RunOneTruncationTest(-9.6714111686030497e+24, -2147483648.0);
RunOneTruncationTest(9.6714157802890681e+24, -2147483648.0);
RunOneTruncationTest(-9.6714157802890681e+24, -2147483648.0);
+ RunOneTruncationTest(1.9342813113834065e+25, 2147483648.0);
+ RunOneTruncationTest(-1.9342813113834065e+25, 2147483648.0);
+
+ RunOneTruncationTest(3.868562622766813e+25, 0);
+ RunOneTruncationTest(-3.868562622766813e+25, 0);
+ RunOneTruncationTest(1.7976931348623157e+308, 0);
+ RunOneTruncationTest(-1.7976931348623157e+308, 0);
}
#undef NaN
diff --git a/deps/v8/test/cctest/test-compiler.cc b/deps/v8/test/cctest/test-compiler.cc
index ae414d784..6540c5d28 100644
--- a/deps/v8/test/cctest/test-compiler.cc
+++ b/deps/v8/test/cctest/test-compiler.cc
@@ -51,7 +51,7 @@ static void SetGlobalProperty(const char* name, Object* value) {
isolate->factory()->InternalizeUtf8String(name);
Handle<JSObject> global(isolate->context()->global_object());
Runtime::SetObjectProperty(isolate, global, internalized_name, object, NONE,
- kNonStrictMode);
+ SLOPPY);
}
@@ -66,8 +66,7 @@ static Handle<JSFunction> Compile(const char* source) {
0,
false,
Handle<Context>(isolate->native_context()),
- NULL, NULL,
- Handle<String>::null(),
+ NULL, NULL, NO_CACHED_DATA,
NOT_NATIVES_CODE);
return isolate->factory()->NewFunctionFromSharedFunctionInfo(
shared_function, isolate->native_context());
diff --git a/deps/v8/test/cctest/test-constantpool.cc b/deps/v8/test/cctest/test-constantpool.cc
index 9f2436c03..e16e45a57 100644
--- a/deps/v8/test/cctest/test-constantpool.cc
+++ b/deps/v8/test/cctest/test-constantpool.cc
@@ -11,6 +11,15 @@
using namespace v8::internal;
+Code* DummyCode(LocalContext* context) {
+ CompileRun("function foo() {};");
+ i::Handle<i::JSFunction> fun = v8::Utils::OpenHandle(
+ *v8::Local<v8::Function>::Cast(
+ (*context)->Global()->Get(v8_str("foo"))));
+ return fun->code();
+}
+
+
TEST(ConstantPool) {
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
@@ -19,32 +28,41 @@ TEST(ConstantPool) {
v8::HandleScope scope(context->GetIsolate());
// Check construction.
- Handle<ConstantPoolArray> array = factory->NewConstantPoolArray(3, 2, 1);
+ Handle<ConstantPoolArray> array = factory->NewConstantPoolArray(3, 1, 2, 1);
CHECK_EQ(array->count_of_int64_entries(), 3);
- CHECK_EQ(array->count_of_ptr_entries(), 2);
+ CHECK_EQ(array->count_of_code_ptr_entries(), 1);
+ CHECK_EQ(array->count_of_heap_ptr_entries(), 2);
CHECK_EQ(array->count_of_int32_entries(), 1);
- CHECK_EQ(array->length(), 6);
+ CHECK_EQ(array->length(), 7);
CHECK_EQ(array->first_int64_index(), 0);
- CHECK_EQ(array->first_ptr_index(), 3);
- CHECK_EQ(array->first_int32_index(), 5);
+ CHECK_EQ(array->first_code_ptr_index(), 3);
+ CHECK_EQ(array->first_heap_ptr_index(), 4);
+ CHECK_EQ(array->first_int32_index(), 6);
// Check getters and setters.
int64_t big_number = V8_2PART_UINT64_C(0x12345678, 9ABCDEF0);
Handle<Object> object = factory->NewHeapNumber(4.0);
+ Code* code = DummyCode(&context);
array->set(0, big_number);
array->set(1, 0.5);
- array->set(3, *object);
- array->set(5, 50);
+ array->set(2, 3e-24);
+ array->set(3, code->entry());
+ array->set(4, code);
+ array->set(5, *object);
+ array->set(6, 50);
CHECK_EQ(array->get_int64_entry(0), big_number);
CHECK_EQ(array->get_int64_entry_as_double(1), 0.5);
- CHECK_EQ(array->get_ptr_entry(3), *object);
- CHECK_EQ(array->get_int32_entry(5), 50);
+ CHECK_EQ(array->get_int64_entry_as_double(2), 3e-24);
+ CHECK_EQ(array->get_code_ptr_entry(3), code->entry());
+ CHECK_EQ(array->get_heap_ptr_entry(4), code);
+ CHECK_EQ(array->get_heap_ptr_entry(5), *object);
+ CHECK_EQ(array->get_int32_entry(6), 50);
// Check pointers are updated on GC.
- Object* old_ptr = array->get_ptr_entry(3);
+ Object* old_ptr = array->get_heap_ptr_entry(5);
CHECK_EQ(*object, old_ptr);
heap->CollectGarbage(NEW_SPACE);
- Object* new_ptr = array->get_ptr_entry(3);
+ Object* new_ptr = array->get_heap_ptr_entry(5);
CHECK_NE(*object, old_ptr);
CHECK_EQ(*object, new_ptr);
}
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index 3bba51439..ed0b190f9 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -1495,20 +1495,16 @@ TEST(FunctionDetails) {
v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
v8::Context::Scope context_scope(env);
- v8::Handle<v8::Script> script_a = v8::Script::Compile(
- v8::String::NewFromUtf8(
- env->GetIsolate(),
+ v8::Handle<v8::Script> script_a = CompileWithOrigin(
" function foo\n() { try { bar(); } catch(e) {} }\n"
- " function bar() { startProfiling(); }\n"),
- v8::String::NewFromUtf8(env->GetIsolate(), "script_a"));
+ " function bar() { startProfiling(); }\n",
+ "script_a");
script_a->Run();
- v8::Handle<v8::Script> script_b = v8::Script::Compile(
- v8::String::NewFromUtf8(
- env->GetIsolate(),
+ v8::Handle<v8::Script> script_b = CompileWithOrigin(
"\n\n function baz() { try { foo(); } catch(e) {} }\n"
"\n\nbaz();\n"
- "stopProfiling();\n"),
- v8::String::NewFromUtf8(env->GetIsolate(), "script_b"));
+ "stopProfiling();\n",
+ "script_b");
script_b->Run();
const v8::CpuProfile* profile = i::ProfilerExtension::last_profile;
const v8::CpuProfileNode* current = profile->GetTopDownRoot();
diff --git a/deps/v8/test/cctest/test-date.cc b/deps/v8/test/cctest/test-date.cc
index 460c07e5a..5190729fa 100644
--- a/deps/v8/test/cctest/test-date.cc
+++ b/deps/v8/test/cctest/test-date.cc
@@ -167,3 +167,25 @@ TEST(DaylightSavingsTime) {
CheckDST(august_20 + 2 * 3600 - 1000);
CheckDST(august_20);
}
+
+
+TEST(DateCacheVersion) {
+ FLAG_allow_natives_syntax = true;
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::Context> context = v8::Context::New(isolate);
+ v8::Context::Scope context_scope(context);
+ v8::Handle<v8::Array> date_cache_version =
+ v8::Handle<v8::Array>::Cast(CompileRun("%DateCacheVersion()"));
+
+ CHECK_EQ(1, static_cast<int32_t>(date_cache_version->Length()));
+ CHECK(date_cache_version->Get(0)->IsNumber());
+ CHECK_EQ(0.0, date_cache_version->Get(0)->NumberValue());
+
+ v8::Date::DateTimeConfigurationChangeNotification(isolate);
+
+ CHECK_EQ(1, static_cast<int32_t>(date_cache_version->Length()));
+ CHECK(date_cache_version->Get(0)->IsNumber());
+ CHECK_EQ(1.0, date_cache_version->Get(0)->NumberValue());
+}
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index 67ef88516..b51cb7724 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -114,7 +114,7 @@ class DebugLocalContext {
v8::internal::Runtime::SetObjectProperty(isolate, global, debug_string,
Handle<Object>(debug->debug_context()->global_proxy(), isolate),
DONT_ENUM,
- ::v8::internal::kNonStrictMode);
+ ::v8::internal::SLOPPY);
}
private:
@@ -581,24 +581,6 @@ const char* frame_script_name_source =
v8::Local<v8::Function> frame_script_name;
-// Source for the JavaScript function which picks out the script data for the
-// top frame.
-const char* frame_script_data_source =
- "function frame_script_data(exec_state) {"
- " return exec_state.frame(0).func().script().data();"
- "}";
-v8::Local<v8::Function> frame_script_data;
-
-
-// Source for the JavaScript function which picks out the script data from
-// AfterCompile event
-const char* compiled_script_data_source =
- "function compiled_script_data(event_data) {"
- " return event_data.script().data();"
- "}";
-v8::Local<v8::Function> compiled_script_data;
-
-
// Source for the JavaScript function which returns the number of frames.
static const char* frame_count_source =
"function frame_count(exec_state) {"
@@ -610,10 +592,8 @@ v8::Handle<v8::Function> frame_count;
// Global variable to store the last function hit - used by some tests.
char last_function_hit[80];
-// Global variable to store the name and data for last script hit - used by some
-// tests.
+// Global variable to store the name for last script hit - used by some tests.
char last_script_name_hit[80];
-char last_script_data_hit[80];
// Global variables to store the last source position - used by some tests.
int last_source_line = -1;
@@ -626,7 +606,6 @@ static void DebugEventBreakPointHitCount(
const v8::Debug::EventDetails& event_details) {
v8::DebugEvent event = event_details.GetEvent();
v8::Handle<v8::Object> exec_state = event_details.GetExecutionState();
- v8::Handle<v8::Object> event_data = event_details.GetEventData();
v8::internal::Isolate* isolate = CcTest::i_isolate();
Debug* debug = isolate->debug();
// When hitting a debug event listener there must be a break set.
@@ -687,40 +666,11 @@ static void DebugEventBreakPointHitCount(
}
}
- if (!frame_script_data.IsEmpty()) {
- // Get the script data of the function script.
- const int argc = 1;
- v8::Handle<v8::Value> argv[argc] = { exec_state };
- v8::Handle<v8::Value> result = frame_script_data->Call(exec_state,
- argc, argv);
- if (result->IsUndefined()) {
- last_script_data_hit[0] = '\0';
- } else {
- result = result->ToString();
- CHECK(result->IsString());
- v8::Handle<v8::String> script_data(result->ToString());
- script_data->WriteUtf8(last_script_data_hit);
- }
- }
-
// Perform a full deoptimization when the specified number of
// breaks have been hit.
if (break_point_hit_count == break_point_hit_count_deoptimize) {
i::Deoptimizer::DeoptimizeAll(isolate);
}
- } else if (event == v8::AfterCompile && !compiled_script_data.IsEmpty()) {
- const int argc = 1;
- v8::Handle<v8::Value> argv[argc] = { event_data };
- v8::Handle<v8::Value> result = compiled_script_data->Call(exec_state,
- argc, argv);
- if (result->IsUndefined()) {
- last_script_data_hit[0] = '\0';
- } else {
- result = result->ToString();
- CHECK(result->IsString());
- v8::Handle<v8::String> script_data(result->ToString());
- script_data->WriteUtf8(last_script_data_hit);
- }
}
}
@@ -2268,8 +2218,7 @@ TEST(ScriptBreakPointLineTopLevel) {
v8::Local<v8::Function> f;
{
v8::HandleScope scope(env->GetIsolate());
- v8::Script::Compile(
- script, v8::String::NewFromUtf8(env->GetIsolate(), "test.html"))->Run();
+ CompileRunWithOrigin(script, "test.html");
}
f = v8::Local<v8::Function>::Cast(
env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
@@ -2285,8 +2234,7 @@ TEST(ScriptBreakPointLineTopLevel) {
// Recompile and run script and check that break point was hit.
break_point_hit_count = 0;
- v8::Script::Compile(
- script, v8::String::NewFromUtf8(env->GetIsolate(), "test.html"))->Run();
+ CompileRunWithOrigin(script, "test.html");
CHECK_EQ(1, break_point_hit_count);
// Call f and check that there are still no break points.
@@ -2321,9 +2269,7 @@ TEST(ScriptBreakPointTopLevelCrash) {
{
v8::HandleScope scope(env->GetIsolate());
break_point_hit_count = 0;
- v8::Script::Compile(script_source,
- v8::String::NewFromUtf8(env->GetIsolate(), "test.html"))
- ->Run();
+ CompileRunWithOrigin(script_source, "test.html");
CHECK_EQ(1, break_point_hit_count);
}
@@ -6249,12 +6195,6 @@ TEST(ScriptNameAndData) {
frame_script_name = CompileFunction(&env,
frame_script_name_source,
"frame_script_name");
- frame_script_data = CompileFunction(&env,
- frame_script_data_source,
- "frame_script_data");
- compiled_script_data = CompileFunction(&env,
- compiled_script_data_source,
- "compiled_script_data");
v8::Debug::SetDebugEventListener2(DebugEventBreakPointHitCount);
@@ -6267,7 +6207,6 @@ TEST(ScriptNameAndData) {
v8::ScriptOrigin origin1 =
v8::ScriptOrigin(v8::String::NewFromUtf8(env->GetIsolate(), "name"));
v8::Handle<v8::Script> script1 = v8::Script::Compile(script, &origin1);
- script1->SetData(v8::String::NewFromUtf8(env->GetIsolate(), "data"));
script1->Run();
v8::Local<v8::Function> f;
f = v8::Local<v8::Function>::Cast(
@@ -6276,7 +6215,6 @@ TEST(ScriptNameAndData) {
f->Call(env->Global(), 0, NULL);
CHECK_EQ(1, break_point_hit_count);
CHECK_EQ("name", last_script_name_hit);
- CHECK_EQ("data", last_script_data_hit);
// Compile the same script again without setting data. As the compilation
// cache is disabled when debugging expect the data to be missing.
@@ -6286,7 +6224,6 @@ TEST(ScriptNameAndData) {
f->Call(env->Global(), 0, NULL);
CHECK_EQ(2, break_point_hit_count);
CHECK_EQ("name", last_script_name_hit);
- CHECK_EQ("", last_script_data_hit); // Undefined results in empty string.
v8::Local<v8::String> data_obj_source = v8::String::NewFromUtf8(
env->GetIsolate(),
@@ -6294,29 +6231,23 @@ TEST(ScriptNameAndData) {
" b: 123,\n"
" toString: function() { return this.a + ' ' + this.b; }\n"
"})\n");
- v8::Local<v8::Value> data_obj = v8::Script::Compile(data_obj_source)->Run();
+ v8::Script::Compile(data_obj_source)->Run();
v8::ScriptOrigin origin2 =
v8::ScriptOrigin(v8::String::NewFromUtf8(env->GetIsolate(), "new name"));
v8::Handle<v8::Script> script2 = v8::Script::Compile(script, &origin2);
script2->Run();
- script2->SetData(data_obj->ToString());
f = v8::Local<v8::Function>::Cast(
env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
f->Call(env->Global(), 0, NULL);
CHECK_EQ(3, break_point_hit_count);
CHECK_EQ("new name", last_script_name_hit);
- CHECK_EQ("abc 123", last_script_data_hit);
- v8::Handle<v8::Script> script3 = v8::Script::Compile(
- script, &origin2, NULL,
- v8::String::NewFromUtf8(env->GetIsolate(), "in compile"));
- CHECK_EQ("in compile", last_script_data_hit);
+ v8::Handle<v8::Script> script3 = v8::Script::Compile(script, &origin2);
script3->Run();
f = v8::Local<v8::Function>::Cast(
env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
f->Call(env->Global(), 0, NULL);
CHECK_EQ(4, break_point_hit_count);
- CHECK_EQ("in compile", last_script_data_hit);
}
@@ -7052,7 +6983,7 @@ TEST(Backtrace) {
v8::Handle<v8::String> void0 =
v8::String::NewFromUtf8(env->GetIsolate(), "void(0)");
- v8::Handle<v8::Script> script = v8::Script::Compile(void0, void0);
+ v8::Handle<v8::Script> script = CompileWithOrigin(void0, void0);
// Check backtrace from "void(0)" script.
BacktraceData::frame_counter = -10;
@@ -7072,18 +7003,20 @@ TEST(Backtrace) {
TEST(GetMirror) {
DebugLocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
v8::Handle<v8::Value> obj =
- v8::Debug::GetMirror(v8::String::NewFromUtf8(env->GetIsolate(), "hodja"));
- v8::Handle<v8::Function> run_test =
- v8::Handle<v8::Function>::Cast(v8::Script::New(
- v8::String::NewFromUtf8(
- env->GetIsolate(),
- "function runTest(mirror) {"
- " return mirror.isString() && (mirror.length() == 5);"
- "}"
- ""
- "runTest;"))->Run());
+ v8::Debug::GetMirror(v8::String::NewFromUtf8(isolate, "hodja"));
+ v8::ScriptCompiler::Source source(v8_str(
+ "function runTest(mirror) {"
+ " return mirror.isString() && (mirror.length() == 5);"
+ "}"
+ ""
+ "runTest;"));
+ v8::Handle<v8::Function> run_test = v8::Handle<v8::Function>::Cast(
+ v8::ScriptCompiler::CompileUnbound(isolate, &source)
+ ->BindToCurrentContext()
+ ->Run());
v8::Handle<v8::Value> result = run_test->Call(env->Global(), 1, &obj);
CHECK(result->IsTrue());
}
@@ -7700,4 +7633,39 @@ TEST(LiveEditDisabled) {
}
+TEST(PrecompiledFunction) {
+ // Regression test for crbug.com/346207. If we have preparse data, parsing the
+ // function in the presence of the debugger (and breakpoints) should still
+ // succeed. The bug was that preparsing was done lazily and parsing was done
+ // eagerly, so, the symbol streams didn't match.
+ DebugLocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ env.ExposeDebug();
+ v8::Debug::SetDebugEventListener2(DebugBreakInlineListener);
+
+ v8::Local<v8::Function> break_here =
+ CompileFunction(&env, "function break_here(){}", "break_here");
+ SetBreakPoint(break_here, 0);
+
+ const char* source =
+ "var a = b = c = 1; \n"
+ "function this_is_lazy() { \n"
+ // This symbol won't appear in the preparse data.
+ " var a; \n"
+ "} \n"
+ "function bar() { \n"
+ " return \"bar\"; \n"
+ "}; \n"
+ "a = b = c = 2; \n"
+ "bar(); \n";
+ v8::Local<v8::Value> result = PreCompileCompileRun(source);
+ CHECK(result->IsString());
+ v8::String::Utf8Value utf8(result);
+ CHECK_EQ("bar", *utf8);
+
+ v8::Debug::SetDebugEventListener2(NULL);
+ CheckDebuggerUnloaded();
+}
+
+
#endif // ENABLE_DEBUGGER_SUPPORT
diff --git a/deps/v8/test/cctest/test-decls.cc b/deps/v8/test/cctest/test-decls.cc
index 1f22c9ff3..d6738a31a 100644
--- a/deps/v8/test/cctest/test-decls.cc
+++ b/deps/v8/test/cctest/test-decls.cc
@@ -557,7 +557,6 @@ class ExistsInPrototypeContext: public DeclarationContext {
TEST(ExistsInPrototype) {
- i::FLAG_es52_globals = true;
HandleScope scope(CcTest::isolate());
// Sanity check to make sure that the holder of the interceptor
@@ -620,7 +619,6 @@ class AbsentInPrototypeContext: public DeclarationContext {
TEST(AbsentInPrototype) {
- i::FLAG_es52_globals = true;
v8::V8::Initialize();
HandleScope scope(CcTest::isolate());
@@ -668,7 +666,6 @@ class ExistsInHiddenPrototypeContext: public DeclarationContext {
TEST(ExistsInHiddenPrototype) {
- i::FLAG_es52_globals = true;
HandleScope scope(CcTest::isolate());
{ ExistsInHiddenPrototypeContext context;
diff --git a/deps/v8/test/cctest/test-deoptimization.cc b/deps/v8/test/cctest/test-deoptimization.cc
index 4b69612f5..dbbb3edb0 100644
--- a/deps/v8/test/cctest/test-deoptimization.cc
+++ b/deps/v8/test/cctest/test-deoptimization.cc
@@ -613,7 +613,6 @@ TEST(DeoptimizeLoadICStoreIC) {
CHECK(!GetJSFunction(env->Global(), "g2")->IsOptimized());
CHECK_EQ(4, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK_EQ(13, env->Global()->Get(v8_str("result"))->Int32Value());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
}
@@ -695,5 +694,4 @@ TEST(DeoptimizeLoadICStoreICNested) {
CHECK(!GetJSFunction(env->Global(), "g2")->IsOptimized());
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK_EQ(13, env->Global()->Get(v8_str("result"))->Int32Value());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
}
diff --git a/deps/v8/test/cctest/test-disasm-arm.cc b/deps/v8/test/cctest/test-disasm-arm.cc
index cb1b1c798..5eff4206c 100644
--- a/deps/v8/test/cctest/test-disasm-arm.cc
+++ b/deps/v8/test/cctest/test-disasm-arm.cc
@@ -272,10 +272,10 @@ TEST(Type0) {
// We only disassemble one instruction so the eor instruction is not here.
COMPARE(eor(r5, r4, Operand(0x1234), LeaveCC, ne),
"1301c234 movwne ip, #4660");
- // Movw can't do setcc so we don't get that here. Mov immediate with setcc
- // is pretty strange anyway.
+ // Movw can't do setcc, so first move to ip, then the following instruction
+ // moves to r5. Mov immediate with setcc is pretty strange anyway.
COMPARE(mov(r5, Operand(0x01234), SetCC, ne),
- "159fc000 ldrne ip, [pc, #+0]");
+ "1301c234 movwne ip, #4660");
// Emit a literal pool now, otherwise this could be dumped later, in the
// middle of a different test.
EMIT_PENDING_LITERALS();
@@ -410,6 +410,8 @@ TEST(Type3) {
"e6843895 pkhbt r3, r4, r5, lsl #17");
COMPARE(pkhtb(r3, r4, Operand(r5, ASR, 17)),
"e68438d5 pkhtb r3, r4, r5, asr #17");
+ COMPARE(uxtb(r9, Operand(r10, ROR, 0)),
+ "e6ef907a uxtb r9, r10");
COMPARE(uxtb(r3, Operand(r4, ROR, 8)),
"e6ef3474 uxtb r3, r4, ror #8");
COMPARE(uxtab(r3, r4, Operand(r5, ROR, 8)),
@@ -687,8 +689,10 @@ TEST(Neon) {
"f421420f vld1.8 {d4, d5, d6, d7}, [r1]");
COMPARE(vst1(Neon16, NeonListOperand(d17, 4), NeonMemOperand(r9)),
"f449124f vst1.16 {d17, d18, d19, d20}, [r9]");
+ COMPARE(vmovl(NeonU8, q3, d1),
+ "f3886a11 vmovl.u8 q3, d1");
COMPARE(vmovl(NeonU8, q4, d2),
- "f3884a12 vmovl.u8 q4, d2");
+ "f3888a12 vmovl.u8 q4, d2");
}
VERIFY_RUN();
diff --git a/deps/v8/test/cctest/test-disasm-arm64.cc b/deps/v8/test/cctest/test-disasm-arm64.cc
new file mode 100644
index 000000000..3343175e9
--- /dev/null
+++ b/deps/v8/test/cctest/test-disasm-arm64.cc
@@ -0,0 +1,1763 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdio.h>
+#include <cstring>
+#include "cctest.h"
+
+#include "v8.h"
+
+#include "macro-assembler.h"
+#include "arm64/assembler-arm64.h"
+#include "arm64/macro-assembler-arm64.h"
+#include "arm64/decoder-arm64-inl.h"
+#include "arm64/disasm-arm64.h"
+#include "arm64/utils-arm64.h"
+
+using namespace v8::internal;
+
+#define TEST_(name) TEST(DISASM_##name)
+
+#define EXP_SIZE (256)
+#define INSTR_SIZE (1024)
+#define SET_UP_CLASS(ASMCLASS) \
+ InitializeVM(); \
+ Isolate* isolate = Isolate::Current(); \
+ HandleScope scope(isolate); \
+ byte* buf = static_cast<byte*>(malloc(INSTR_SIZE)); \
+ uint32_t encoding = 0; \
+ ASMCLASS* assm = new ASMCLASS(isolate, buf, INSTR_SIZE); \
+ Decoder<DispatchingDecoderVisitor>* decoder = \
+ new Decoder<DispatchingDecoderVisitor>(); \
+ Disassembler* disasm = new Disassembler(); \
+ decoder->AppendVisitor(disasm)
+
+#define SET_UP() SET_UP_CLASS(Assembler)
+
+#define COMPARE(ASM, EXP) \
+ assm->Reset(); \
+ assm->ASM; \
+ assm->GetCode(NULL); \
+ decoder->Decode(reinterpret_cast<Instruction*>(buf)); \
+ encoding = *reinterpret_cast<uint32_t*>(buf); \
+ if (strcmp(disasm->GetOutput(), EXP) != 0) { \
+ printf("%u : Encoding: %08" PRIx32 "\nExpected: %s\nFound: %s\n", \
+ __LINE__, encoding, EXP, disasm->GetOutput()); \
+ abort(); \
+ }
+
+#define COMPARE_PREFIX(ASM, EXP) \
+ assm->Reset(); \
+ assm->ASM; \
+ assm->GetCode(NULL); \
+ decoder->Decode(reinterpret_cast<Instruction*>(buf)); \
+ encoding = *reinterpret_cast<uint32_t*>(buf); \
+ if (strncmp(disasm->GetOutput(), EXP, strlen(EXP)) != 0) { \
+ printf("%u : Encoding: %08" PRIx32 "\nExpected: %s\nFound: %s\n", \
+ __LINE__, encoding, EXP, disasm->GetOutput()); \
+ abort(); \
+ }
+
+#define CLEANUP() \
+ delete disasm; \
+ delete decoder; \
+ delete assm
+
+
+static bool vm_initialized = false;
+
+
+static void InitializeVM() {
+ if (!vm_initialized) {
+ CcTest::InitializeVM();
+ vm_initialized = true;
+ }
+}
+
+
+TEST_(bootstrap) {
+ SET_UP();
+
+ // Instructions generated by C compiler, disassembled by objdump, and
+ // reformatted to suit our disassembly style.
+ COMPARE(dci(0xa9ba7bfd), "stp fp, lr, [csp, #-96]!");
+ COMPARE(dci(0x910003fd), "mov fp, csp");
+ COMPARE(dci(0x9100e3a0), "add x0, fp, #0x38 (56)");
+ COMPARE(dci(0xb900001f), "str wzr, [x0]");
+ COMPARE(dci(0x528000e1), "movz w1, #0x7");
+ COMPARE(dci(0xb9001c01), "str w1, [x0, #28]");
+ COMPARE(dci(0x390043a0), "strb w0, [fp, #16]");
+ COMPARE(dci(0x790027a0), "strh w0, [fp, #18]");
+ COMPARE(dci(0xb9400400), "ldr w0, [x0, #4]");
+ COMPARE(dci(0x0b000021), "add w1, w1, w0");
+ COMPARE(dci(0x531b6800), "lsl w0, w0, #5");
+ COMPARE(dci(0x521e0400), "eor w0, w0, #0xc");
+ COMPARE(dci(0x72af0f00), "movk w0, #0x7878, lsl #16");
+ COMPARE(dci(0xd360fc00), "lsr x0, x0, #32");
+ COMPARE(dci(0x13037c01), "asr w1, w0, #3");
+ COMPARE(dci(0x4b000021), "sub w1, w1, w0");
+ COMPARE(dci(0x2a0103e0), "mov w0, w1");
+ COMPARE(dci(0x93407c00), "sxtw x0, w0");
+ COMPARE(dci(0x2a000020), "orr w0, w1, w0");
+ COMPARE(dci(0xa8c67bfd), "ldp fp, lr, [csp], #96");
+
+ CLEANUP();
+}
+
+
+TEST_(mov_mvn) {
+ SET_UP_CLASS(MacroAssembler);
+
+ COMPARE(Mov(w0, Operand(0x1234)), "movz w0, #0x1234");
+ COMPARE(Mov(x1, Operand(0x1234)), "movz x1, #0x1234");
+ COMPARE(Mov(w2, Operand(w3)), "mov w2, w3");
+ COMPARE(Mov(x4, Operand(x5)), "mov x4, x5");
+ COMPARE(Mov(w6, Operand(w7, LSL, 5)), "lsl w6, w7, #5");
+ COMPARE(Mov(x8, Operand(x9, ASR, 42)), "asr x8, x9, #42");
+ COMPARE(Mov(w10, Operand(w11, UXTB)), "uxtb w10, w11");
+ COMPARE(Mov(x12, Operand(x13, UXTB, 1)), "ubfiz x12, x13, #1, #8");
+ COMPARE(Mov(w14, Operand(w15, SXTH, 2)), "sbfiz w14, w15, #2, #16");
+ COMPARE(Mov(x16, Operand(x20, SXTW, 3)), "sbfiz x16, x20, #3, #32");
+
+ COMPARE(Mov(x0, csp), "mov x0, csp");
+ COMPARE(Mov(w0, wcsp), "mov w0, wcsp");
+ COMPARE(Mov(x0, xzr), "mov x0, xzr");
+ COMPARE(Mov(w0, wzr), "mov w0, wzr");
+ COMPARE(mov(x0, csp), "mov x0, csp");
+ COMPARE(mov(w0, wcsp), "mov w0, wcsp");
+ COMPARE(mov(x0, xzr), "mov x0, xzr");
+ COMPARE(mov(w0, wzr), "mov w0, wzr");
+
+ COMPARE(Mvn(w0, Operand(0x1)), "movn w0, #0x1");
+ COMPARE(Mvn(x1, Operand(0xfff)), "movn x1, #0xfff");
+ COMPARE(Mvn(w2, Operand(w3)), "mvn w2, w3");
+ COMPARE(Mvn(x4, Operand(x5)), "mvn x4, x5");
+ COMPARE(Mvn(w6, Operand(w7, LSL, 12)), "mvn w6, w7, lsl #12");
+ COMPARE(Mvn(x8, Operand(x9, ASR, 63)), "mvn x8, x9, asr #63");
+
+ CLEANUP();
+}
+
+
+TEST_(move_immediate) {
+ SET_UP();
+
+ COMPARE(movz(w0, 0x1234), "movz w0, #0x1234");
+ COMPARE(movz(x1, 0xabcd0000), "movz x1, #0xabcd0000");
+ COMPARE(movz(x2, 0x555500000000), "movz x2, #0x555500000000");
+ COMPARE(movz(x3, 0xaaaa000000000000), "movz x3, #0xaaaa000000000000");
+ COMPARE(movz(x4, 0xabcd, 16), "movz x4, #0xabcd0000");
+ COMPARE(movz(x5, 0x5555, 32), "movz x5, #0x555500000000");
+ COMPARE(movz(x6, 0xaaaa, 48), "movz x6, #0xaaaa000000000000");
+
+ COMPARE(movk(w7, 0x1234), "movk w7, #0x1234");
+ COMPARE(movk(x8, 0xabcd0000), "movk x8, #0xabcd, lsl #16");
+ COMPARE(movk(x9, 0x555500000000), "movk x9, #0x5555, lsl #32");
+ COMPARE(movk(x10, 0xaaaa000000000000), "movk x10, #0xaaaa, lsl #48");
+ COMPARE(movk(w11, 0xabcd, 16), "movk w11, #0xabcd, lsl #16");
+ COMPARE(movk(x12, 0x5555, 32), "movk x12, #0x5555, lsl #32");
+ COMPARE(movk(x13, 0xaaaa, 48), "movk x13, #0xaaaa, lsl #48");
+
+ COMPARE(movn(w14, 0x1234), "movn w14, #0x1234");
+ COMPARE(movn(x15, 0xabcd0000), "movn x15, #0xabcd0000");
+ COMPARE(movn(x16, 0x555500000000), "movn x16, #0x555500000000");
+ COMPARE(movn(x17, 0xaaaa000000000000), "movn x17, #0xaaaa000000000000");
+ COMPARE(movn(w18, 0xabcd, 16), "movn w18, #0xabcd0000");
+ COMPARE(movn(x19, 0x5555, 32), "movn x19, #0x555500000000");
+ COMPARE(movn(x20, 0xaaaa, 48), "movn x20, #0xaaaa000000000000");
+
+ COMPARE(movk(w21, 0), "movk w21, #0x0");
+ COMPARE(movk(x22, 0, 0), "movk x22, #0x0");
+ COMPARE(movk(w23, 0, 16), "movk w23, #0x0, lsl #16");
+ COMPARE(movk(x24, 0, 32), "movk x24, #0x0, lsl #32");
+ COMPARE(movk(x25, 0, 48), "movk x25, #0x0, lsl #48");
+
+ CLEANUP();
+}
+
+
+TEST(move_immediate_2) {
+ SET_UP_CLASS(MacroAssembler);
+
+ // Move instructions expected for certain immediates. This is really a macro
+ // assembler test, to ensure it generates immediates efficiently.
+ COMPARE(Mov(w0, 0), "movz w0, #0x0");
+ COMPARE(Mov(w0, 0x0000ffff), "movz w0, #0xffff");
+ COMPARE(Mov(w0, 0x00010000), "movz w0, #0x10000");
+ COMPARE(Mov(w0, 0xffff0000), "movz w0, #0xffff0000");
+ COMPARE(Mov(w0, 0x0001ffff), "movn w0, #0xfffe0000");
+ COMPARE(Mov(w0, 0xffff8000), "movn w0, #0x7fff");
+ COMPARE(Mov(w0, 0xfffffffe), "movn w0, #0x1");
+ COMPARE(Mov(w0, 0xffffffff), "movn w0, #0x0");
+ COMPARE(Mov(w0, 0x00ffff00), "mov w0, #0xffff00");
+ COMPARE(Mov(w0, 0xfffe7fff), "mov w0, #0xfffe7fff");
+ COMPARE(Mov(w0, 0xfffeffff), "movn w0, #0x10000");
+ COMPARE(Mov(w0, 0xffff7fff), "movn w0, #0x8000");
+
+ COMPARE(Mov(x0, 0), "movz x0, #0x0");
+ COMPARE(Mov(x0, 0x0000ffff), "movz x0, #0xffff");
+ COMPARE(Mov(x0, 0x00010000), "movz x0, #0x10000");
+ COMPARE(Mov(x0, 0xffff0000), "movz x0, #0xffff0000");
+ COMPARE(Mov(x0, 0x0001ffff), "mov x0, #0x1ffff");
+ COMPARE(Mov(x0, 0xffff8000), "mov x0, #0xffff8000");
+ COMPARE(Mov(x0, 0xfffffffe), "mov x0, #0xfffffffe");
+ COMPARE(Mov(x0, 0xffffffff), "mov x0, #0xffffffff");
+ COMPARE(Mov(x0, 0x00ffff00), "mov x0, #0xffff00");
+ COMPARE(Mov(x0, 0xffff000000000000), "movz x0, #0xffff000000000000");
+ COMPARE(Mov(x0, 0x0000ffff00000000), "movz x0, #0xffff00000000");
+ COMPARE(Mov(x0, 0x00000000ffff0000), "movz x0, #0xffff0000");
+ COMPARE(Mov(x0, 0xffffffffffff0000), "movn x0, #0xffff");
+ COMPARE(Mov(x0, 0xffffffff0000ffff), "movn x0, #0xffff0000");
+ COMPARE(Mov(x0, 0xffff0000ffffffff), "movn x0, #0xffff00000000");
+ COMPARE(Mov(x0, 0x0000ffffffffffff), "movn x0, #0xffff000000000000");
+ COMPARE(Mov(x0, 0xfffe7fffffffffff), "mov x0, #0xfffe7fffffffffff");
+ COMPARE(Mov(x0, 0xfffeffffffffffff), "movn x0, #0x1000000000000");
+ COMPARE(Mov(x0, 0xffff7fffffffffff), "movn x0, #0x800000000000");
+ COMPARE(Mov(x0, 0xfffffffe7fffffff), "mov x0, #0xfffffffe7fffffff");
+ COMPARE(Mov(x0, 0xfffffffeffffffff), "movn x0, #0x100000000");
+ COMPARE(Mov(x0, 0xffffffff7fffffff), "movn x0, #0x80000000");
+ COMPARE(Mov(x0, 0xfffffffffffe7fff), "mov x0, #0xfffffffffffe7fff");
+ COMPARE(Mov(x0, 0xfffffffffffeffff), "movn x0, #0x10000");
+ COMPARE(Mov(x0, 0xffffffffffff7fff), "movn x0, #0x8000");
+ COMPARE(Mov(x0, 0xffffffffffffffff), "movn x0, #0x0");
+
+ COMPARE(Movk(w0, 0x1234, 0), "movk w0, #0x1234");
+ COMPARE(Movk(x1, 0x2345, 0), "movk x1, #0x2345");
+ COMPARE(Movk(w2, 0x3456, 16), "movk w2, #0x3456, lsl #16");
+ COMPARE(Movk(x3, 0x4567, 16), "movk x3, #0x4567, lsl #16");
+ COMPARE(Movk(x4, 0x5678, 32), "movk x4, #0x5678, lsl #32");
+ COMPARE(Movk(x5, 0x6789, 48), "movk x5, #0x6789, lsl #48");
+
+ CLEANUP();
+}
+
+
+TEST_(add_immediate) {
+ SET_UP();
+
+ COMPARE(add(w0, w1, Operand(0xff)), "add w0, w1, #0xff (255)");
+ COMPARE(add(x2, x3, Operand(0x3ff)), "add x2, x3, #0x3ff (1023)");
+ COMPARE(add(w4, w5, Operand(0xfff)), "add w4, w5, #0xfff (4095)");
+ COMPARE(add(x6, x7, Operand(0x1000)), "add x6, x7, #0x1000 (4096)");
+ COMPARE(add(w8, w9, Operand(0xff000)), "add w8, w9, #0xff000 (1044480)");
+ COMPARE(add(x10, x11, Operand(0x3ff000)),
+ "add x10, x11, #0x3ff000 (4190208)");
+ COMPARE(add(w12, w13, Operand(0xfff000)),
+ "add w12, w13, #0xfff000 (16773120)");
+ COMPARE(adds(w14, w15, Operand(0xff)), "adds w14, w15, #0xff (255)");
+ COMPARE(adds(x16, x17, Operand(0xaa000)),
+ "adds x16, x17, #0xaa000 (696320)");
+ COMPARE(cmn(w18, Operand(0xff)), "cmn w18, #0xff (255)");
+ COMPARE(cmn(x19, Operand(0xff000)), "cmn x19, #0xff000 (1044480)");
+ COMPARE(add(w0, wcsp, Operand(0)), "mov w0, wcsp");
+ COMPARE(add(csp, x0, Operand(0)), "mov csp, x0");
+
+ COMPARE(add(w1, wcsp, Operand(8)), "add w1, wcsp, #0x8 (8)");
+ COMPARE(add(x2, csp, Operand(16)), "add x2, csp, #0x10 (16)");
+ COMPARE(add(wcsp, wcsp, Operand(42)), "add wcsp, wcsp, #0x2a (42)");
+ COMPARE(cmn(csp, Operand(24)), "cmn csp, #0x18 (24)");
+ COMPARE(adds(wzr, wcsp, Operand(9)), "cmn wcsp, #0x9 (9)");
+
+ CLEANUP();
+}
+
+
+TEST_(sub_immediate) {
+ SET_UP();
+
+ COMPARE(sub(w0, w1, Operand(0xff)), "sub w0, w1, #0xff (255)");
+ COMPARE(sub(x2, x3, Operand(0x3ff)), "sub x2, x3, #0x3ff (1023)");
+ COMPARE(sub(w4, w5, Operand(0xfff)), "sub w4, w5, #0xfff (4095)");
+ COMPARE(sub(x6, x7, Operand(0x1000)), "sub x6, x7, #0x1000 (4096)");
+ COMPARE(sub(w8, w9, Operand(0xff000)), "sub w8, w9, #0xff000 (1044480)");
+ COMPARE(sub(x10, x11, Operand(0x3ff000)),
+ "sub x10, x11, #0x3ff000 (4190208)");
+ COMPARE(sub(w12, w13, Operand(0xfff000)),
+ "sub w12, w13, #0xfff000 (16773120)");
+ COMPARE(subs(w14, w15, Operand(0xff)), "subs w14, w15, #0xff (255)");
+ COMPARE(subs(x16, x17, Operand(0xaa000)),
+ "subs x16, x17, #0xaa000 (696320)");
+ COMPARE(cmp(w18, Operand(0xff)), "cmp w18, #0xff (255)");
+ COMPARE(cmp(x19, Operand(0xff000)), "cmp x19, #0xff000 (1044480)");
+
+ COMPARE(add(w1, wcsp, Operand(8)), "add w1, wcsp, #0x8 (8)");
+ COMPARE(add(x2, csp, Operand(16)), "add x2, csp, #0x10 (16)");
+ COMPARE(add(wcsp, wcsp, Operand(42)), "add wcsp, wcsp, #0x2a (42)");
+ COMPARE(cmn(csp, Operand(24)), "cmn csp, #0x18 (24)");
+ COMPARE(adds(wzr, wcsp, Operand(9)), "cmn wcsp, #0x9 (9)");
+
+ CLEANUP();
+}
+
+
+TEST_(add_shifted) {
+ SET_UP();
+
+ COMPARE(add(w0, w1, Operand(w2)), "add w0, w1, w2");
+ COMPARE(add(x3, x4, Operand(x5)), "add x3, x4, x5");
+ COMPARE(add(w6, w7, Operand(w8, LSL, 1)), "add w6, w7, w8, lsl #1");
+ COMPARE(add(x9, x10, Operand(x11, LSL, 2)), "add x9, x10, x11, lsl #2");
+ COMPARE(add(w12, w13, Operand(w14, LSR, 3)), "add w12, w13, w14, lsr #3");
+ COMPARE(add(x15, x16, Operand(x17, LSR, 4)), "add x15, x16, x17, lsr #4");
+ COMPARE(add(w18, w19, Operand(w20, ASR, 5)), "add w18, w19, w20, asr #5");
+ COMPARE(add(x21, x22, Operand(x23, ASR, 6)), "add x21, x22, x23, asr #6");
+ COMPARE(cmn(w24, Operand(w25)), "cmn w24, w25");
+ COMPARE(cmn(x26, Operand(cp, LSL, 63)), "cmn x26, cp, lsl #63");
+
+ COMPARE(add(x0, csp, Operand(x1)), "add x0, csp, x1");
+ COMPARE(add(w2, wcsp, Operand(w3)), "add w2, wcsp, w3");
+ COMPARE(add(x4, csp, Operand(x5, LSL, 1)), "add x4, csp, x5, lsl #1");
+ COMPARE(add(x4, xzr, Operand(x5, LSL, 1)), "add x4, xzr, x5, lsl #1");
+ COMPARE(add(w6, wcsp, Operand(w7, LSL, 3)), "add w6, wcsp, w7, lsl #3");
+ COMPARE(adds(xzr, csp, Operand(x8, LSL, 4)), "cmn csp, x8, lsl #4");
+ COMPARE(adds(xzr, xzr, Operand(x8, LSL, 5)), "cmn xzr, x8, lsl #5");
+
+ CLEANUP();
+}
+
+
+TEST_(sub_shifted) {
+ SET_UP();
+
+ COMPARE(sub(w0, w1, Operand(w2)), "sub w0, w1, w2");
+ COMPARE(sub(x3, x4, Operand(x5)), "sub x3, x4, x5");
+ COMPARE(sub(w6, w7, Operand(w8, LSL, 1)), "sub w6, w7, w8, lsl #1");
+ COMPARE(sub(x9, x10, Operand(x11, LSL, 2)), "sub x9, x10, x11, lsl #2");
+ COMPARE(sub(w12, w13, Operand(w14, LSR, 3)), "sub w12, w13, w14, lsr #3");
+ COMPARE(sub(x15, x16, Operand(x17, LSR, 4)), "sub x15, x16, x17, lsr #4");
+ COMPARE(sub(w18, w19, Operand(w20, ASR, 5)), "sub w18, w19, w20, asr #5");
+ COMPARE(sub(x21, x22, Operand(x23, ASR, 6)), "sub x21, x22, x23, asr #6");
+ COMPARE(cmp(w24, Operand(w25)), "cmp w24, w25");
+ COMPARE(cmp(x26, Operand(cp, LSL, 63)), "cmp x26, cp, lsl #63");
+ COMPARE(neg(w28, Operand(w29)), "neg w28, w29");
+ COMPARE(neg(lr, Operand(x0, LSR, 62)), "neg lr, x0, lsr #62");
+ COMPARE(negs(w1, Operand(w2)), "negs w1, w2");
+ COMPARE(negs(x3, Operand(x4, ASR, 61)), "negs x3, x4, asr #61");
+
+ COMPARE(sub(x0, csp, Operand(x1)), "sub x0, csp, x1");
+ COMPARE(sub(w2, wcsp, Operand(w3)), "sub w2, wcsp, w3");
+ COMPARE(sub(x4, csp, Operand(x5, LSL, 1)), "sub x4, csp, x5, lsl #1");
+ COMPARE(sub(x4, xzr, Operand(x5, LSL, 1)), "neg x4, x5, lsl #1");
+ COMPARE(sub(w6, wcsp, Operand(w7, LSL, 3)), "sub w6, wcsp, w7, lsl #3");
+ COMPARE(subs(xzr, csp, Operand(x8, LSL, 4)), "cmp csp, x8, lsl #4");
+ COMPARE(subs(xzr, xzr, Operand(x8, LSL, 5)), "cmp xzr, x8, lsl #5");
+
+ CLEANUP();
+}
+
+
+TEST_(add_extended) {
+ SET_UP();
+
+ COMPARE(add(w0, w1, Operand(w2, UXTB)), "add w0, w1, w2, uxtb");
+ COMPARE(adds(x3, x4, Operand(w5, UXTB, 1)), "adds x3, x4, w5, uxtb #1");
+ COMPARE(add(w6, w7, Operand(w8, UXTH, 2)), "add w6, w7, w8, uxth #2");
+ COMPARE(adds(x9, x10, Operand(x11, UXTW, 3)), "adds x9, x10, w11, uxtw #3");
+ COMPARE(add(x12, x13, Operand(x14, UXTX, 4)), "add x12, x13, x14, uxtx #4");
+ COMPARE(adds(w15, w16, Operand(w17, SXTB, 4)), "adds w15, w16, w17, sxtb #4");
+ COMPARE(add(x18, x19, Operand(x20, SXTB, 3)), "add x18, x19, w20, sxtb #3");
+ COMPARE(adds(w21, w22, Operand(w23, SXTH, 2)), "adds w21, w22, w23, sxth #2");
+ COMPARE(add(x24, x25, Operand(x26, SXTW, 1)), "add x24, x25, w26, sxtw #1");
+ COMPARE(adds(cp, jssp, Operand(fp, SXTX)), "adds cp, jssp, fp, sxtx");
+ COMPARE(cmn(w0, Operand(w1, UXTB, 2)), "cmn w0, w1, uxtb #2");
+ COMPARE(cmn(x2, Operand(x3, SXTH, 4)), "cmn x2, w3, sxth #4");
+
+ COMPARE(add(w0, wcsp, Operand(w1, UXTB)), "add w0, wcsp, w1, uxtb");
+ COMPARE(add(x2, csp, Operand(x3, UXTH, 1)), "add x2, csp, w3, uxth #1");
+ COMPARE(add(wcsp, wcsp, Operand(w4, UXTW, 2)), "add wcsp, wcsp, w4, lsl #2");
+ COMPARE(cmn(csp, Operand(xzr, UXTX, 3)), "cmn csp, xzr, lsl #3");
+ COMPARE(cmn(csp, Operand(xzr, LSL, 4)), "cmn csp, xzr, lsl #4");
+
+ CLEANUP();
+}
+
+
+TEST_(sub_extended) {
+ SET_UP();
+
+ COMPARE(sub(w0, w1, Operand(w2, UXTB)), "sub w0, w1, w2, uxtb");
+ COMPARE(subs(x3, x4, Operand(w5, UXTB, 1)), "subs x3, x4, w5, uxtb #1");
+ COMPARE(sub(w6, w7, Operand(w8, UXTH, 2)), "sub w6, w7, w8, uxth #2");
+ COMPARE(subs(x9, x10, Operand(x11, UXTW, 3)), "subs x9, x10, w11, uxtw #3");
+ COMPARE(sub(x12, x13, Operand(x14, UXTX, 4)), "sub x12, x13, x14, uxtx #4");
+ COMPARE(subs(w15, w16, Operand(w17, SXTB, 4)), "subs w15, w16, w17, sxtb #4");
+ COMPARE(sub(x18, x19, Operand(x20, SXTB, 3)), "sub x18, x19, w20, sxtb #3");
+ COMPARE(subs(w21, w22, Operand(w23, SXTH, 2)), "subs w21, w22, w23, sxth #2");
+ COMPARE(sub(x24, x25, Operand(x26, SXTW, 1)), "sub x24, x25, w26, sxtw #1");
+ COMPARE(subs(cp, jssp, Operand(fp, SXTX)), "subs cp, jssp, fp, sxtx");
+ COMPARE(cmp(w0, Operand(w1, SXTB, 1)), "cmp w0, w1, sxtb #1");
+ COMPARE(cmp(x2, Operand(x3, UXTH, 3)), "cmp x2, w3, uxth #3");
+
+ COMPARE(sub(w0, wcsp, Operand(w1, UXTB)), "sub w0, wcsp, w1, uxtb");
+ COMPARE(sub(x2, csp, Operand(x3, UXTH, 1)), "sub x2, csp, w3, uxth #1");
+ COMPARE(sub(wcsp, wcsp, Operand(w4, UXTW, 2)), "sub wcsp, wcsp, w4, lsl #2");
+ COMPARE(cmp(csp, Operand(xzr, UXTX, 3)), "cmp csp, xzr, lsl #3");
+ COMPARE(cmp(csp, Operand(xzr, LSL, 4)), "cmp csp, xzr, lsl #4");
+
+ CLEANUP();
+}
+
+
+TEST_(adc_subc_ngc) {
+ SET_UP();
+
+ COMPARE(adc(w0, w1, Operand(w2)), "adc w0, w1, w2");
+ COMPARE(adc(x3, x4, Operand(x5)), "adc x3, x4, x5");
+ COMPARE(adcs(w6, w7, Operand(w8)), "adcs w6, w7, w8");
+ COMPARE(adcs(x9, x10, Operand(x11)), "adcs x9, x10, x11");
+ COMPARE(sbc(w12, w13, Operand(w14)), "sbc w12, w13, w14");
+ COMPARE(sbc(x15, x16, Operand(x17)), "sbc x15, x16, x17");
+ COMPARE(sbcs(w18, w19, Operand(w20)), "sbcs w18, w19, w20");
+ COMPARE(sbcs(x21, x22, Operand(x23)), "sbcs x21, x22, x23");
+ COMPARE(ngc(w24, Operand(w25)), "ngc w24, w25");
+ COMPARE(ngc(x26, Operand(cp)), "ngc x26, cp");
+ COMPARE(ngcs(w28, Operand(w29)), "ngcs w28, w29");
+ COMPARE(ngcs(lr, Operand(x0)), "ngcs lr, x0");
+
+ CLEANUP();
+}
+
+
+TEST_(mul_and_div) {
+ SET_UP();
+
+ COMPARE(mul(w0, w1, w2), "mul w0, w1, w2");
+ COMPARE(mul(x3, x4, x5), "mul x3, x4, x5");
+ COMPARE(mul(w30, w0, w1), "mul w30, w0, w1");
+ COMPARE(mul(lr, x0, x1), "mul lr, x0, x1");
+ COMPARE(mneg(w0, w1, w2), "mneg w0, w1, w2");
+ COMPARE(mneg(x3, x4, x5), "mneg x3, x4, x5");
+ COMPARE(mneg(w30, w0, w1), "mneg w30, w0, w1");
+ COMPARE(mneg(lr, x0, x1), "mneg lr, x0, x1");
+ COMPARE(smull(x0, w0, w1), "smull x0, w0, w1");
+ COMPARE(smull(lr, w30, w0), "smull lr, w30, w0");
+ COMPARE(smulh(x0, x1, x2), "smulh x0, x1, x2");
+
+ COMPARE(madd(w0, w1, w2, w3), "madd w0, w1, w2, w3");
+ COMPARE(madd(x4, x5, x6, x7), "madd x4, x5, x6, x7");
+ COMPARE(madd(w8, w9, w10, wzr), "mul w8, w9, w10");
+ COMPARE(madd(x11, x12, x13, xzr), "mul x11, x12, x13");
+ COMPARE(msub(w14, w15, w16, w17), "msub w14, w15, w16, w17");
+ COMPARE(msub(x18, x19, x20, x21), "msub x18, x19, x20, x21");
+ COMPARE(msub(w22, w23, w24, wzr), "mneg w22, w23, w24");
+ COMPARE(msub(x25, x26, x0, xzr), "mneg x25, x26, x0");
+
+ COMPARE(sdiv(w0, w1, w2), "sdiv w0, w1, w2");
+ COMPARE(sdiv(x3, x4, x5), "sdiv x3, x4, x5");
+ COMPARE(udiv(w6, w7, w8), "udiv w6, w7, w8");
+ COMPARE(udiv(x9, x10, x11), "udiv x9, x10, x11");
+
+ CLEANUP();
+}
+
+
+TEST(maddl_msubl) {
+ SET_UP();
+
+ COMPARE(smaddl(x0, w1, w2, x3), "smaddl x0, w1, w2, x3");
+ COMPARE(smaddl(x25, w21, w22, x16), "smaddl x25, w21, w22, x16");
+ COMPARE(umaddl(x0, w1, w2, x3), "umaddl x0, w1, w2, x3");
+ COMPARE(umaddl(x25, w21, w22, x16), "umaddl x25, w21, w22, x16");
+
+ COMPARE(smsubl(x0, w1, w2, x3), "smsubl x0, w1, w2, x3");
+ COMPARE(smsubl(x25, w21, w22, x16), "smsubl x25, w21, w22, x16");
+ COMPARE(umsubl(x0, w1, w2, x3), "umsubl x0, w1, w2, x3");
+ COMPARE(umsubl(x25, w21, w22, x16), "umsubl x25, w21, w22, x16");
+
+ CLEANUP();
+}
+
+
+TEST_(dp_1_source) {
+ SET_UP();
+
+ COMPARE(rbit(w0, w1), "rbit w0, w1");
+ COMPARE(rbit(x2, x3), "rbit x2, x3");
+ COMPARE(rev16(w4, w5), "rev16 w4, w5");
+ COMPARE(rev16(x6, x7), "rev16 x6, x7");
+ COMPARE(rev32(x8, x9), "rev32 x8, x9");
+ COMPARE(rev(w10, w11), "rev w10, w11");
+ COMPARE(rev(x12, x13), "rev x12, x13");
+ COMPARE(clz(w14, w15), "clz w14, w15");
+ COMPARE(clz(x16, x17), "clz x16, x17");
+ COMPARE(cls(w18, w19), "cls w18, w19");
+ COMPARE(cls(x20, x21), "cls x20, x21");
+
+ CLEANUP();
+}
+
+
+TEST_(bitfield) {
+ SET_UP();
+
+ COMPARE(sxtb(w0, w1), "sxtb w0, w1");
+ COMPARE(sxtb(x2, x3), "sxtb x2, w3");
+ COMPARE(sxth(w4, w5), "sxth w4, w5");
+ COMPARE(sxth(x6, x7), "sxth x6, w7");
+ COMPARE(sxtw(x8, x9), "sxtw x8, w9");
+ COMPARE(sxtb(x0, w1), "sxtb x0, w1");
+ COMPARE(sxth(x2, w3), "sxth x2, w3");
+ COMPARE(sxtw(x4, w5), "sxtw x4, w5");
+
+ COMPARE(uxtb(w10, w11), "uxtb w10, w11");
+ COMPARE(uxtb(x12, x13), "uxtb x12, w13");
+ COMPARE(uxth(w14, w15), "uxth w14, w15");
+ COMPARE(uxth(x16, x17), "uxth x16, w17");
+ COMPARE(uxtw(x18, x19), "ubfx x18, x19, #0, #32");
+
+ COMPARE(asr(w20, w21, 10), "asr w20, w21, #10");
+ COMPARE(asr(x22, x23, 20), "asr x22, x23, #20");
+ COMPARE(lsr(w24, w25, 10), "lsr w24, w25, #10");
+ COMPARE(lsr(x26, cp, 20), "lsr x26, cp, #20");
+ COMPARE(lsl(w28, w29, 10), "lsl w28, w29, #10");
+ COMPARE(lsl(lr, x0, 20), "lsl lr, x0, #20");
+
+ COMPARE(sbfiz(w1, w2, 1, 20), "sbfiz w1, w2, #1, #20");
+ COMPARE(sbfiz(x3, x4, 2, 19), "sbfiz x3, x4, #2, #19");
+ COMPARE(sbfx(w5, w6, 3, 18), "sbfx w5, w6, #3, #18");
+ COMPARE(sbfx(x7, x8, 4, 17), "sbfx x7, x8, #4, #17");
+ COMPARE(bfi(w9, w10, 5, 16), "bfi w9, w10, #5, #16");
+ COMPARE(bfi(x11, x12, 6, 15), "bfi x11, x12, #6, #15");
+ COMPARE(bfxil(w13, w14, 7, 14), "bfxil w13, w14, #7, #14");
+ COMPARE(bfxil(x15, x16, 8, 13), "bfxil x15, x16, #8, #13");
+ COMPARE(ubfiz(w17, w18, 9, 12), "ubfiz w17, w18, #9, #12");
+ COMPARE(ubfiz(x19, x20, 10, 11), "ubfiz x19, x20, #10, #11");
+ COMPARE(ubfx(w21, w22, 11, 10), "ubfx w21, w22, #11, #10");
+ COMPARE(ubfx(x23, x24, 12, 9), "ubfx x23, x24, #12, #9");
+
+ CLEANUP();
+}
+
+
+TEST_(extract) {
+ SET_UP();
+
+ COMPARE(extr(w0, w1, w2, 0), "extr w0, w1, w2, #0");
+ COMPARE(extr(x3, x4, x5, 1), "extr x3, x4, x5, #1");
+ COMPARE(extr(w6, w7, w8, 31), "extr w6, w7, w8, #31");
+ COMPARE(extr(x9, x10, x11, 63), "extr x9, x10, x11, #63");
+ COMPARE(extr(w12, w13, w13, 10), "ror w12, w13, #10");
+ COMPARE(extr(x14, x15, x15, 42), "ror x14, x15, #42");
+
+ CLEANUP();
+}
+
+
+TEST_(logical_immediate) {
+ SET_UP();
+ #define RESULT_SIZE (256)
+
+ char result[RESULT_SIZE];
+
+ // Test immediate encoding - 64-bit destination.
+ // 64-bit patterns.
+ uint64_t value = 0x7fffffff;
+ for (int i = 0; i < 64; i++) {
+ snprintf(result, RESULT_SIZE, "and x0, x0, #0x%" PRIx64, value);
+ COMPARE(and_(x0, x0, Operand(value)), result);
+ value = ((value & 1) << 63) | (value >> 1); // Rotate right 1 bit.
+ }
+
+ // 32-bit patterns.
+ value = 0x00003fff00003fffL;
+ for (int i = 0; i < 32; i++) {
+ snprintf(result, RESULT_SIZE, "and x0, x0, #0x%" PRIx64, value);
+ COMPARE(and_(x0, x0, Operand(value)), result);
+ value = ((value & 1) << 63) | (value >> 1); // Rotate right 1 bit.
+ }
+
+ // 16-bit patterns.
+ value = 0x001f001f001f001fL;
+ for (int i = 0; i < 16; i++) {
+ snprintf(result, RESULT_SIZE, "and x0, x0, #0x%" PRIx64, value);
+ COMPARE(and_(x0, x0, Operand(value)), result);
+ value = ((value & 1) << 63) | (value >> 1); // Rotate right 1 bit.
+ }
+
+ // 8-bit patterns.
+ value = 0x0e0e0e0e0e0e0e0eL;
+ for (int i = 0; i < 8; i++) {
+ snprintf(result, RESULT_SIZE, "and x0, x0, #0x%" PRIx64, value);
+ COMPARE(and_(x0, x0, Operand(value)), result);
+ value = ((value & 1) << 63) | (value >> 1); // Rotate right 1 bit.
+ }
+
+ // 4-bit patterns.
+ value = 0x6666666666666666L;
+ for (int i = 0; i < 4; i++) {
+ snprintf(result, RESULT_SIZE, "and x0, x0, #0x%" PRIx64, value);
+ COMPARE(and_(x0, x0, Operand(value)), result);
+ value = ((value & 1) << 63) | (value >> 1); // Rotate right 1 bit.
+ }
+
+ // 2-bit patterns.
+ COMPARE(and_(x0, x0, Operand(0x5555555555555555L)),
+ "and x0, x0, #0x5555555555555555");
+ COMPARE(and_(x0, x0, Operand(0xaaaaaaaaaaaaaaaaL)),
+ "and x0, x0, #0xaaaaaaaaaaaaaaaa");
+
+ // Test immediate encoding - 32-bit destination.
+ COMPARE(and_(w0, w0, Operand(0xff8007ff)),
+ "and w0, w0, #0xff8007ff"); // 32-bit pattern.
+ COMPARE(and_(w0, w0, Operand(0xf87ff87f)),
+ "and w0, w0, #0xf87ff87f"); // 16-bit pattern.
+ COMPARE(and_(w0, w0, Operand(0x87878787)),
+ "and w0, w0, #0x87878787"); // 8-bit pattern.
+ COMPARE(and_(w0, w0, Operand(0x66666666)),
+ "and w0, w0, #0x66666666"); // 4-bit pattern.
+ COMPARE(and_(w0, w0, Operand(0x55555555)),
+ "and w0, w0, #0x55555555"); // 2-bit pattern.
+
+ // Test other instructions.
+ COMPARE(tst(w1, Operand(0x11111111)),
+ "tst w1, #0x11111111");
+ COMPARE(tst(x2, Operand(0x8888888888888888L)),
+ "tst x2, #0x8888888888888888");
+ COMPARE(orr(w7, w8, Operand(0xaaaaaaaa)),
+ "orr w7, w8, #0xaaaaaaaa");
+ COMPARE(orr(x9, x10, Operand(0x5555555555555555L)),
+ "orr x9, x10, #0x5555555555555555");
+ COMPARE(eor(w15, w16, Operand(0x00000001)),
+ "eor w15, w16, #0x1");
+ COMPARE(eor(x17, x18, Operand(0x0000000000000003L)),
+ "eor x17, x18, #0x3");
+ COMPARE(ands(w23, w24, Operand(0x0000000f)), "ands w23, w24, #0xf");
+ COMPARE(ands(x25, x26, Operand(0x800000000000000fL)),
+ "ands x25, x26, #0x800000000000000f");
+
+ // Test inverse.
+ COMPARE(bic(w3, w4, Operand(0x20202020)),
+ "and w3, w4, #0xdfdfdfdf");
+ COMPARE(bic(x5, x6, Operand(0x4040404040404040L)),
+ "and x5, x6, #0xbfbfbfbfbfbfbfbf");
+ COMPARE(orn(w11, w12, Operand(0x40004000)),
+ "orr w11, w12, #0xbfffbfff");
+ COMPARE(orn(x13, x14, Operand(0x8181818181818181L)),
+ "orr x13, x14, #0x7e7e7e7e7e7e7e7e");
+ COMPARE(eon(w19, w20, Operand(0x80000001)),
+ "eor w19, w20, #0x7ffffffe");
+ COMPARE(eon(x21, x22, Operand(0xc000000000000003L)),
+ "eor x21, x22, #0x3ffffffffffffffc");
+ COMPARE(bics(w27, w28, Operand(0xfffffff7)), "ands w27, w28, #0x8");
+ COMPARE(bics(fp, x0, Operand(0xfffffffeffffffffL)),
+ "ands fp, x0, #0x100000000");
+
+ // Test stack pointer.
+ COMPARE(and_(wcsp, wzr, Operand(7)), "and wcsp, wzr, #0x7");
+ COMPARE(ands(xzr, xzr, Operand(7)), "tst xzr, #0x7");
+ COMPARE(orr(csp, xzr, Operand(15)), "orr csp, xzr, #0xf");
+ COMPARE(eor(wcsp, w0, Operand(31)), "eor wcsp, w0, #0x1f");
+
+ // Test move aliases.
+ COMPARE(orr(w0, wzr, Operand(0x00000780)), "orr w0, wzr, #0x780");
+ COMPARE(orr(w1, wzr, Operand(0x00007800)), "orr w1, wzr, #0x7800");
+ COMPARE(orr(w2, wzr, Operand(0x00078000)), "mov w2, #0x78000");
+ COMPARE(orr(w3, wzr, Operand(0x00780000)), "orr w3, wzr, #0x780000");
+ COMPARE(orr(w4, wzr, Operand(0x07800000)), "orr w4, wzr, #0x7800000");
+ COMPARE(orr(x5, xzr, Operand(0xffffffffffffc001UL)),
+ "orr x5, xzr, #0xffffffffffffc001");
+ COMPARE(orr(x6, xzr, Operand(0xfffffffffffc001fUL)),
+ "mov x6, #0xfffffffffffc001f");
+ COMPARE(orr(x7, xzr, Operand(0xffffffffffc001ffUL)),
+ "mov x7, #0xffffffffffc001ff");
+ COMPARE(orr(x8, xzr, Operand(0xfffffffffc001fffUL)),
+ "mov x8, #0xfffffffffc001fff");
+ COMPARE(orr(x9, xzr, Operand(0xffffffffc001ffffUL)),
+ "orr x9, xzr, #0xffffffffc001ffff");
+
+ CLEANUP();
+}
+
+
+TEST_(logical_shifted) {
+ SET_UP();
+
+ COMPARE(and_(w0, w1, Operand(w2)), "and w0, w1, w2");
+ COMPARE(and_(x3, x4, Operand(x5, LSL, 1)), "and x3, x4, x5, lsl #1");
+ COMPARE(and_(w6, w7, Operand(w8, LSR, 2)), "and w6, w7, w8, lsr #2");
+ COMPARE(and_(x9, x10, Operand(x11, ASR, 3)), "and x9, x10, x11, asr #3");
+ COMPARE(and_(w12, w13, Operand(w14, ROR, 4)), "and w12, w13, w14, ror #4");
+
+ COMPARE(bic(w15, w16, Operand(w17)), "bic w15, w16, w17");
+ COMPARE(bic(x18, x19, Operand(x20, LSL, 5)), "bic x18, x19, x20, lsl #5");
+ COMPARE(bic(w21, w22, Operand(w23, LSR, 6)), "bic w21, w22, w23, lsr #6");
+ COMPARE(bic(x24, x25, Operand(x26, ASR, 7)), "bic x24, x25, x26, asr #7");
+ COMPARE(bic(w27, w28, Operand(w29, ROR, 8)), "bic w27, w28, w29, ror #8");
+
+ COMPARE(orr(w0, w1, Operand(w2)), "orr w0, w1, w2");
+ COMPARE(orr(x3, x4, Operand(x5, LSL, 9)), "orr x3, x4, x5, lsl #9");
+ COMPARE(orr(w6, w7, Operand(w8, LSR, 10)), "orr w6, w7, w8, lsr #10");
+ COMPARE(orr(x9, x10, Operand(x11, ASR, 11)), "orr x9, x10, x11, asr #11");
+ COMPARE(orr(w12, w13, Operand(w14, ROR, 12)), "orr w12, w13, w14, ror #12");
+
+ COMPARE(orn(w15, w16, Operand(w17)), "orn w15, w16, w17");
+ COMPARE(orn(x18, x19, Operand(x20, LSL, 13)), "orn x18, x19, x20, lsl #13");
+ COMPARE(orn(w21, w22, Operand(w23, LSR, 14)), "orn w21, w22, w23, lsr #14");
+ COMPARE(orn(x24, x25, Operand(x26, ASR, 15)), "orn x24, x25, x26, asr #15");
+ COMPARE(orn(w27, w28, Operand(w29, ROR, 16)), "orn w27, w28, w29, ror #16");
+
+ COMPARE(eor(w0, w1, Operand(w2)), "eor w0, w1, w2");
+ COMPARE(eor(x3, x4, Operand(x5, LSL, 17)), "eor x3, x4, x5, lsl #17");
+ COMPARE(eor(w6, w7, Operand(w8, LSR, 18)), "eor w6, w7, w8, lsr #18");
+ COMPARE(eor(x9, x10, Operand(x11, ASR, 19)), "eor x9, x10, x11, asr #19");
+ COMPARE(eor(w12, w13, Operand(w14, ROR, 20)), "eor w12, w13, w14, ror #20");
+
+ COMPARE(eon(w15, w16, Operand(w17)), "eon w15, w16, w17");
+ COMPARE(eon(x18, x19, Operand(x20, LSL, 21)), "eon x18, x19, x20, lsl #21");
+ COMPARE(eon(w21, w22, Operand(w23, LSR, 22)), "eon w21, w22, w23, lsr #22");
+ COMPARE(eon(x24, x25, Operand(x26, ASR, 23)), "eon x24, x25, x26, asr #23");
+ COMPARE(eon(w27, w28, Operand(w29, ROR, 24)), "eon w27, w28, w29, ror #24");
+
+ COMPARE(ands(w0, w1, Operand(w2)), "ands w0, w1, w2");
+ COMPARE(ands(x3, x4, Operand(x5, LSL, 1)), "ands x3, x4, x5, lsl #1");
+ COMPARE(ands(w6, w7, Operand(w8, LSR, 2)), "ands w6, w7, w8, lsr #2");
+ COMPARE(ands(x9, x10, Operand(x11, ASR, 3)), "ands x9, x10, x11, asr #3");
+ COMPARE(ands(w12, w13, Operand(w14, ROR, 4)), "ands w12, w13, w14, ror #4");
+
+ COMPARE(bics(w15, w16, Operand(w17)), "bics w15, w16, w17");
+ COMPARE(bics(x18, x19, Operand(x20, LSL, 5)), "bics x18, x19, x20, lsl #5");
+ COMPARE(bics(w21, w22, Operand(w23, LSR, 6)), "bics w21, w22, w23, lsr #6");
+ COMPARE(bics(x24, x25, Operand(x26, ASR, 7)), "bics x24, x25, x26, asr #7");
+ COMPARE(bics(w27, w28, Operand(w29, ROR, 8)), "bics w27, w28, w29, ror #8");
+
+ COMPARE(tst(w0, Operand(w1)), "tst w0, w1");
+ COMPARE(tst(w2, Operand(w3, ROR, 10)), "tst w2, w3, ror #10");
+ COMPARE(tst(x0, Operand(x1)), "tst x0, x1");
+ COMPARE(tst(x2, Operand(x3, ROR, 42)), "tst x2, x3, ror #42");
+
+ COMPARE(orn(w0, wzr, Operand(w1)), "mvn w0, w1");
+ COMPARE(orn(w2, wzr, Operand(w3, ASR, 5)), "mvn w2, w3, asr #5");
+ COMPARE(orn(x0, xzr, Operand(x1)), "mvn x0, x1");
+ COMPARE(orn(x2, xzr, Operand(x3, ASR, 42)), "mvn x2, x3, asr #42");
+
+ COMPARE(orr(w0, wzr, Operand(w1)), "mov w0, w1");
+ COMPARE(orr(x0, xzr, Operand(x1)), "mov x0, x1");
+ COMPARE(orr(w16, wzr, Operand(w17, LSL, 1)), "orr w16, wzr, w17, lsl #1");
+ COMPARE(orr(x16, xzr, Operand(x17, ASR, 2)), "orr x16, xzr, x17, asr #2");
+
+ CLEANUP();
+}
+
+
+TEST_(dp_2_source) {
+ SET_UP();
+
+ COMPARE(lslv(w0, w1, w2), "lsl w0, w1, w2");
+ COMPARE(lslv(x3, x4, x5), "lsl x3, x4, x5");
+ COMPARE(lsrv(w6, w7, w8), "lsr w6, w7, w8");
+ COMPARE(lsrv(x9, x10, x11), "lsr x9, x10, x11");
+ COMPARE(asrv(w12, w13, w14), "asr w12, w13, w14");
+ COMPARE(asrv(x15, x16, x17), "asr x15, x16, x17");
+ COMPARE(rorv(w18, w19, w20), "ror w18, w19, w20");
+ COMPARE(rorv(x21, x22, x23), "ror x21, x22, x23");
+
+ CLEANUP();
+}
+
+
+TEST_(adr) {
+ SET_UP();
+
+ COMPARE_PREFIX(adr(x0, 0), "adr x0, #+0x0");
+ COMPARE_PREFIX(adr(x1, 1), "adr x1, #+0x1");
+ COMPARE_PREFIX(adr(x2, -1), "adr x2, #-0x1");
+ COMPARE_PREFIX(adr(x3, 4), "adr x3, #+0x4");
+ COMPARE_PREFIX(adr(x4, -4), "adr x4, #-0x4");
+ COMPARE_PREFIX(adr(x5, 0x000fffff), "adr x5, #+0xfffff");
+ COMPARE_PREFIX(adr(x6, -0x00100000), "adr x6, #-0x100000");
+ COMPARE_PREFIX(adr(xzr, 0), "adr xzr, #+0x0");
+
+ CLEANUP();
+}
+
+
+TEST_(branch) {
+ SET_UP();
+
+ #define INST_OFF(x) ((x) >> kInstructionSizeLog2)
+ COMPARE_PREFIX(b(INST_OFF(0x4)), "b #+0x4");
+ COMPARE_PREFIX(b(INST_OFF(-0x4)), "b #-0x4");
+ COMPARE_PREFIX(b(INST_OFF(0x7fffffc)), "b #+0x7fffffc");
+ COMPARE_PREFIX(b(INST_OFF(-0x8000000)), "b #-0x8000000");
+ COMPARE_PREFIX(b(INST_OFF(0xffffc), eq), "b.eq #+0xffffc");
+ COMPARE_PREFIX(b(INST_OFF(-0x100000), mi), "b.mi #-0x100000");
+ COMPARE_PREFIX(bl(INST_OFF(0x4)), "bl #+0x4");
+ COMPARE_PREFIX(bl(INST_OFF(-0x4)), "bl #-0x4");
+ COMPARE_PREFIX(bl(INST_OFF(0xffffc)), "bl #+0xffffc");
+ COMPARE_PREFIX(bl(INST_OFF(-0x100000)), "bl #-0x100000");
+ COMPARE_PREFIX(cbz(w0, INST_OFF(0xffffc)), "cbz w0, #+0xffffc");
+ COMPARE_PREFIX(cbz(x1, INST_OFF(-0x100000)), "cbz x1, #-0x100000");
+ COMPARE_PREFIX(cbnz(w2, INST_OFF(0xffffc)), "cbnz w2, #+0xffffc");
+ COMPARE_PREFIX(cbnz(x3, INST_OFF(-0x100000)), "cbnz x3, #-0x100000");
+ COMPARE_PREFIX(tbz(w4, 0, INST_OFF(0x7ffc)), "tbz w4, #0, #+0x7ffc");
+ COMPARE_PREFIX(tbz(x5, 63, INST_OFF(-0x8000)), "tbz x5, #63, #-0x8000");
+ COMPARE_PREFIX(tbz(w6, 31, INST_OFF(0)), "tbz w6, #31, #+0x0");
+ COMPARE_PREFIX(tbz(x7, 31, INST_OFF(0x4)), "tbz w7, #31, #+0x4");
+ COMPARE_PREFIX(tbz(x8, 32, INST_OFF(0x8)), "tbz x8, #32, #+0x8");
+ COMPARE_PREFIX(tbnz(w8, 0, INST_OFF(0x7ffc)), "tbnz w8, #0, #+0x7ffc");
+ COMPARE_PREFIX(tbnz(x9, 63, INST_OFF(-0x8000)), "tbnz x9, #63, #-0x8000");
+ COMPARE_PREFIX(tbnz(w10, 31, INST_OFF(0)), "tbnz w10, #31, #+0x0");
+ COMPARE_PREFIX(tbnz(x11, 31, INST_OFF(0x4)), "tbnz w11, #31, #+0x4");
+ COMPARE_PREFIX(tbnz(x12, 32, INST_OFF(0x8)), "tbnz x12, #32, #+0x8");
+ COMPARE(br(x0), "br x0");
+ COMPARE(blr(x1), "blr x1");
+ COMPARE(ret(x2), "ret x2");
+ COMPARE(ret(lr), "ret")
+
+ CLEANUP();
+}
+
+
+TEST_(load_store) {
+ SET_UP();
+
+ COMPARE(ldr(w0, MemOperand(x1)), "ldr w0, [x1]");
+ COMPARE(ldr(w2, MemOperand(x3, 4)), "ldr w2, [x3, #4]");
+ COMPARE(ldr(w4, MemOperand(x5, 16380)), "ldr w4, [x5, #16380]");
+ COMPARE(ldr(x6, MemOperand(x7)), "ldr x6, [x7]");
+ COMPARE(ldr(x8, MemOperand(x9, 8)), "ldr x8, [x9, #8]");
+ COMPARE(ldr(x10, MemOperand(x11, 32760)), "ldr x10, [x11, #32760]");
+ COMPARE(str(w12, MemOperand(x13)), "str w12, [x13]");
+ COMPARE(str(w14, MemOperand(x15, 4)), "str w14, [x15, #4]");
+ COMPARE(str(w16, MemOperand(x17, 16380)), "str w16, [x17, #16380]");
+ COMPARE(str(x18, MemOperand(x19)), "str x18, [x19]");
+ COMPARE(str(x20, MemOperand(x21, 8)), "str x20, [x21, #8]");
+ COMPARE(str(x22, MemOperand(x23, 32760)), "str x22, [x23, #32760]");
+
+ COMPARE(ldr(w0, MemOperand(x1, 4, PreIndex)), "ldr w0, [x1, #4]!");
+ COMPARE(ldr(w2, MemOperand(x3, 255, PreIndex)), "ldr w2, [x3, #255]!");
+ COMPARE(ldr(w4, MemOperand(x5, -256, PreIndex)), "ldr w4, [x5, #-256]!");
+ COMPARE(ldr(x6, MemOperand(x7, 8, PreIndex)), "ldr x6, [x7, #8]!");
+ COMPARE(ldr(x8, MemOperand(x9, 255, PreIndex)), "ldr x8, [x9, #255]!");
+ COMPARE(ldr(x10, MemOperand(x11, -256, PreIndex)), "ldr x10, [x11, #-256]!");
+ COMPARE(str(w12, MemOperand(x13, 4, PreIndex)), "str w12, [x13, #4]!");
+ COMPARE(str(w14, MemOperand(x15, 255, PreIndex)), "str w14, [x15, #255]!");
+ COMPARE(str(w16, MemOperand(x17, -256, PreIndex)), "str w16, [x17, #-256]!");
+ COMPARE(str(x18, MemOperand(x19, 8, PreIndex)), "str x18, [x19, #8]!");
+ COMPARE(str(x20, MemOperand(x21, 255, PreIndex)), "str x20, [x21, #255]!");
+ COMPARE(str(x22, MemOperand(x23, -256, PreIndex)), "str x22, [x23, #-256]!");
+
+ COMPARE(ldr(w0, MemOperand(x1, 4, PostIndex)), "ldr w0, [x1], #4");
+ COMPARE(ldr(w2, MemOperand(x3, 255, PostIndex)), "ldr w2, [x3], #255");
+ COMPARE(ldr(w4, MemOperand(x5, -256, PostIndex)), "ldr w4, [x5], #-256");
+ COMPARE(ldr(x6, MemOperand(x7, 8, PostIndex)), "ldr x6, [x7], #8");
+ COMPARE(ldr(x8, MemOperand(x9, 255, PostIndex)), "ldr x8, [x9], #255");
+ COMPARE(ldr(x10, MemOperand(x11, -256, PostIndex)), "ldr x10, [x11], #-256");
+ COMPARE(str(w12, MemOperand(x13, 4, PostIndex)), "str w12, [x13], #4");
+ COMPARE(str(w14, MemOperand(x15, 255, PostIndex)), "str w14, [x15], #255");
+ COMPARE(str(w16, MemOperand(x17, -256, PostIndex)), "str w16, [x17], #-256");
+ COMPARE(str(x18, MemOperand(x19, 8, PostIndex)), "str x18, [x19], #8");
+ COMPARE(str(x20, MemOperand(x21, 255, PostIndex)), "str x20, [x21], #255");
+ COMPARE(str(x22, MemOperand(x23, -256, PostIndex)), "str x22, [x23], #-256");
+
+ // TODO(all): Fix this for jssp.
+ COMPARE(ldr(w24, MemOperand(jssp)), "ldr w24, [jssp]");
+ COMPARE(ldr(x25, MemOperand(jssp, 8)), "ldr x25, [jssp, #8]");
+ COMPARE(str(w26, MemOperand(jssp, 4, PreIndex)), "str w26, [jssp, #4]!");
+ COMPARE(str(cp, MemOperand(jssp, -8, PostIndex)), "str cp, [jssp], #-8");
+
+ COMPARE(ldrsw(x0, MemOperand(x1)), "ldrsw x0, [x1]");
+ COMPARE(ldrsw(x2, MemOperand(x3, 8)), "ldrsw x2, [x3, #8]");
+ COMPARE(ldrsw(x4, MemOperand(x5, 42, PreIndex)), "ldrsw x4, [x5, #42]!");
+ COMPARE(ldrsw(x6, MemOperand(x7, -11, PostIndex)), "ldrsw x6, [x7], #-11");
+
+ CLEANUP();
+}
+
+
+TEST_(load_store_regoffset) {
+ SET_UP();
+
+ COMPARE(ldr(w0, MemOperand(x1, w2, UXTW)), "ldr w0, [x1, w2, uxtw]");
+ COMPARE(ldr(w3, MemOperand(x4, w5, UXTW, 2)), "ldr w3, [x4, w5, uxtw #2]");
+ COMPARE(ldr(w6, MemOperand(x7, x8)), "ldr w6, [x7, x8]");
+ COMPARE(ldr(w9, MemOperand(x10, x11, LSL, 2)), "ldr w9, [x10, x11, lsl #2]");
+ COMPARE(ldr(w12, MemOperand(x13, w14, SXTW)), "ldr w12, [x13, w14, sxtw]");
+ COMPARE(ldr(w15, MemOperand(x16, w17, SXTW, 2)),
+ "ldr w15, [x16, w17, sxtw #2]");
+ COMPARE(ldr(w18, MemOperand(x19, x20, SXTX)), "ldr w18, [x19, x20, sxtx]");
+ COMPARE(ldr(w21, MemOperand(x22, x23, SXTX, 2)),
+ "ldr w21, [x22, x23, sxtx #2]");
+ COMPARE(ldr(x0, MemOperand(x1, w2, UXTW)), "ldr x0, [x1, w2, uxtw]");
+ COMPARE(ldr(x3, MemOperand(x4, w5, UXTW, 3)), "ldr x3, [x4, w5, uxtw #3]");
+ COMPARE(ldr(x6, MemOperand(x7, x8)), "ldr x6, [x7, x8]");
+ COMPARE(ldr(x9, MemOperand(x10, x11, LSL, 3)), "ldr x9, [x10, x11, lsl #3]");
+ COMPARE(ldr(x12, MemOperand(x13, w14, SXTW)), "ldr x12, [x13, w14, sxtw]");
+ COMPARE(ldr(x15, MemOperand(x16, w17, SXTW, 3)),
+ "ldr x15, [x16, w17, sxtw #3]");
+ COMPARE(ldr(x18, MemOperand(x19, x20, SXTX)), "ldr x18, [x19, x20, sxtx]");
+ COMPARE(ldr(x21, MemOperand(x22, x23, SXTX, 3)),
+ "ldr x21, [x22, x23, sxtx #3]");
+
+ COMPARE(str(w0, MemOperand(x1, w2, UXTW)), "str w0, [x1, w2, uxtw]");
+ COMPARE(str(w3, MemOperand(x4, w5, UXTW, 2)), "str w3, [x4, w5, uxtw #2]");
+ COMPARE(str(w6, MemOperand(x7, x8)), "str w6, [x7, x8]");
+ COMPARE(str(w9, MemOperand(x10, x11, LSL, 2)), "str w9, [x10, x11, lsl #2]");
+ COMPARE(str(w12, MemOperand(x13, w14, SXTW)), "str w12, [x13, w14, sxtw]");
+ COMPARE(str(w15, MemOperand(x16, w17, SXTW, 2)),
+ "str w15, [x16, w17, sxtw #2]");
+ COMPARE(str(w18, MemOperand(x19, x20, SXTX)), "str w18, [x19, x20, sxtx]");
+ COMPARE(str(w21, MemOperand(x22, x23, SXTX, 2)),
+ "str w21, [x22, x23, sxtx #2]");
+ COMPARE(str(x0, MemOperand(x1, w2, UXTW)), "str x0, [x1, w2, uxtw]");
+ COMPARE(str(x3, MemOperand(x4, w5, UXTW, 3)), "str x3, [x4, w5, uxtw #3]");
+ COMPARE(str(x6, MemOperand(x7, x8)), "str x6, [x7, x8]");
+ COMPARE(str(x9, MemOperand(x10, x11, LSL, 3)), "str x9, [x10, x11, lsl #3]");
+ COMPARE(str(x12, MemOperand(x13, w14, SXTW)), "str x12, [x13, w14, sxtw]");
+ COMPARE(str(x15, MemOperand(x16, w17, SXTW, 3)),
+ "str x15, [x16, w17, sxtw #3]");
+ COMPARE(str(x18, MemOperand(x19, x20, SXTX)), "str x18, [x19, x20, sxtx]");
+ COMPARE(str(x21, MemOperand(x22, x23, SXTX, 3)),
+ "str x21, [x22, x23, sxtx #3]");
+
+ COMPARE(ldrb(w0, MemOperand(x1, w2, UXTW)), "ldrb w0, [x1, w2, uxtw]");
+ COMPARE(ldrb(w6, MemOperand(x7, x8)), "ldrb w6, [x7, x8]");
+ COMPARE(ldrb(w12, MemOperand(x13, w14, SXTW)), "ldrb w12, [x13, w14, sxtw]");
+ COMPARE(ldrb(w18, MemOperand(x19, x20, SXTX)), "ldrb w18, [x19, x20, sxtx]");
+ COMPARE(strb(w0, MemOperand(x1, w2, UXTW)), "strb w0, [x1, w2, uxtw]");
+ COMPARE(strb(w6, MemOperand(x7, x8)), "strb w6, [x7, x8]");
+ COMPARE(strb(w12, MemOperand(x13, w14, SXTW)), "strb w12, [x13, w14, sxtw]");
+ COMPARE(strb(w18, MemOperand(x19, x20, SXTX)), "strb w18, [x19, x20, sxtx]");
+
+ COMPARE(ldrh(w0, MemOperand(x1, w2, UXTW)), "ldrh w0, [x1, w2, uxtw]");
+ COMPARE(ldrh(w3, MemOperand(x4, w5, UXTW, 1)), "ldrh w3, [x4, w5, uxtw #1]");
+ COMPARE(ldrh(w6, MemOperand(x7, x8)), "ldrh w6, [x7, x8]");
+ COMPARE(ldrh(w9, MemOperand(x10, x11, LSL, 1)),
+ "ldrh w9, [x10, x11, lsl #1]");
+ COMPARE(ldrh(w12, MemOperand(x13, w14, SXTW)), "ldrh w12, [x13, w14, sxtw]");
+ COMPARE(ldrh(w15, MemOperand(x16, w17, SXTW, 1)),
+ "ldrh w15, [x16, w17, sxtw #1]");
+ COMPARE(ldrh(w18, MemOperand(x19, x20, SXTX)), "ldrh w18, [x19, x20, sxtx]");
+ COMPARE(ldrh(w21, MemOperand(x22, x23, SXTX, 1)),
+ "ldrh w21, [x22, x23, sxtx #1]");
+ COMPARE(strh(w0, MemOperand(x1, w2, UXTW)), "strh w0, [x1, w2, uxtw]");
+ COMPARE(strh(w3, MemOperand(x4, w5, UXTW, 1)), "strh w3, [x4, w5, uxtw #1]");
+ COMPARE(strh(w6, MemOperand(x7, x8)), "strh w6, [x7, x8]");
+ COMPARE(strh(w9, MemOperand(x10, x11, LSL, 1)),
+ "strh w9, [x10, x11, lsl #1]");
+ COMPARE(strh(w12, MemOperand(x13, w14, SXTW)), "strh w12, [x13, w14, sxtw]");
+ COMPARE(strh(w15, MemOperand(x16, w17, SXTW, 1)),
+ "strh w15, [x16, w17, sxtw #1]");
+ COMPARE(strh(w18, MemOperand(x19, x20, SXTX)), "strh w18, [x19, x20, sxtx]");
+ COMPARE(strh(w21, MemOperand(x22, x23, SXTX, 1)),
+ "strh w21, [x22, x23, sxtx #1]");
+
+ // TODO(all): Fix this for jssp.
+ COMPARE(ldr(x0, MemOperand(jssp, wzr, SXTW)), "ldr x0, [jssp, wzr, sxtw]");
+ COMPARE(str(x1, MemOperand(jssp, xzr)), "str x1, [jssp, xzr]");
+
+ CLEANUP();
+}
+
+
+TEST_(load_store_byte) {
+ SET_UP();
+
+ COMPARE(ldrb(w0, MemOperand(x1)), "ldrb w0, [x1]");
+ COMPARE(ldrb(x2, MemOperand(x3)), "ldrb w2, [x3]");
+ COMPARE(ldrb(w4, MemOperand(x5, 4095)), "ldrb w4, [x5, #4095]");
+ COMPARE(ldrb(w6, MemOperand(x7, 255, PreIndex)), "ldrb w6, [x7, #255]!");
+ COMPARE(ldrb(w8, MemOperand(x9, -256, PreIndex)), "ldrb w8, [x9, #-256]!");
+ COMPARE(ldrb(w10, MemOperand(x11, 255, PostIndex)), "ldrb w10, [x11], #255");
+ COMPARE(ldrb(w12, MemOperand(x13, -256, PostIndex)),
+ "ldrb w12, [x13], #-256");
+ COMPARE(strb(w14, MemOperand(x15)), "strb w14, [x15]");
+ COMPARE(strb(x16, MemOperand(x17)), "strb w16, [x17]");
+ COMPARE(strb(w18, MemOperand(x19, 4095)), "strb w18, [x19, #4095]");
+ COMPARE(strb(w20, MemOperand(x21, 255, PreIndex)), "strb w20, [x21, #255]!");
+ COMPARE(strb(w22, MemOperand(x23, -256, PreIndex)),
+ "strb w22, [x23, #-256]!");
+ COMPARE(strb(w24, MemOperand(x25, 255, PostIndex)), "strb w24, [x25], #255");
+ COMPARE(strb(w26, MemOperand(cp, -256, PostIndex)),
+ "strb w26, [cp], #-256");
+ // TODO(all): Fix this for jssp.
+ COMPARE(ldrb(w28, MemOperand(jssp, 3, PostIndex)), "ldrb w28, [jssp], #3");
+ COMPARE(strb(fp, MemOperand(jssp, -42, PreIndex)), "strb w29, [jssp, #-42]!");
+ COMPARE(ldrsb(w0, MemOperand(x1)), "ldrsb w0, [x1]");
+ COMPARE(ldrsb(x2, MemOperand(x3, 8)), "ldrsb x2, [x3, #8]");
+ COMPARE(ldrsb(w4, MemOperand(x5, 42, PreIndex)), "ldrsb w4, [x5, #42]!");
+ COMPARE(ldrsb(x6, MemOperand(x7, -11, PostIndex)), "ldrsb x6, [x7], #-11");
+
+ CLEANUP();
+}
+
+
+TEST_(load_store_half) {
+ SET_UP();
+
+ COMPARE(ldrh(w0, MemOperand(x1)), "ldrh w0, [x1]");
+ COMPARE(ldrh(x2, MemOperand(x3)), "ldrh w2, [x3]");
+ COMPARE(ldrh(w4, MemOperand(x5, 8190)), "ldrh w4, [x5, #8190]");
+ COMPARE(ldrh(w6, MemOperand(x7, 255, PreIndex)), "ldrh w6, [x7, #255]!");
+ COMPARE(ldrh(w8, MemOperand(x9, -256, PreIndex)), "ldrh w8, [x9, #-256]!");
+ COMPARE(ldrh(w10, MemOperand(x11, 255, PostIndex)), "ldrh w10, [x11], #255");
+ COMPARE(ldrh(w12, MemOperand(x13, -256, PostIndex)),
+ "ldrh w12, [x13], #-256");
+ COMPARE(strh(w14, MemOperand(x15)), "strh w14, [x15]");
+ COMPARE(strh(x16, MemOperand(x17)), "strh w16, [x17]");
+ COMPARE(strh(w18, MemOperand(x19, 8190)), "strh w18, [x19, #8190]");
+ COMPARE(strh(w20, MemOperand(x21, 255, PreIndex)), "strh w20, [x21, #255]!");
+ COMPARE(strh(w22, MemOperand(x23, -256, PreIndex)),
+ "strh w22, [x23, #-256]!");
+ COMPARE(strh(w24, MemOperand(x25, 255, PostIndex)), "strh w24, [x25], #255");
+ COMPARE(strh(w26, MemOperand(cp, -256, PostIndex)),
+ "strh w26, [cp], #-256");
+ // TODO(all): Fix this for jssp.
+ COMPARE(ldrh(w28, MemOperand(jssp, 3, PostIndex)), "ldrh w28, [jssp], #3");
+ COMPARE(strh(fp, MemOperand(jssp, -42, PreIndex)), "strh w29, [jssp, #-42]!");
+ COMPARE(ldrh(w30, MemOperand(x0, 255)), "ldurh w30, [x0, #255]");
+ COMPARE(ldrh(x1, MemOperand(x2, -256)), "ldurh w1, [x2, #-256]");
+ COMPARE(strh(w3, MemOperand(x4, 255)), "sturh w3, [x4, #255]");
+ COMPARE(strh(x5, MemOperand(x6, -256)), "sturh w5, [x6, #-256]");
+ COMPARE(ldrsh(w0, MemOperand(x1)), "ldrsh w0, [x1]");
+ COMPARE(ldrsh(w2, MemOperand(x3, 8)), "ldrsh w2, [x3, #8]");
+ COMPARE(ldrsh(w4, MemOperand(x5, 42, PreIndex)), "ldrsh w4, [x5, #42]!");
+ COMPARE(ldrsh(x6, MemOperand(x7, -11, PostIndex)), "ldrsh x6, [x7], #-11");
+
+ CLEANUP();
+}
+
+
+TEST_(load_store_fp) {
+ SET_UP();
+
+ COMPARE(ldr(s0, MemOperand(x1)), "ldr s0, [x1]");
+ COMPARE(ldr(s2, MemOperand(x3, 4)), "ldr s2, [x3, #4]");
+ COMPARE(ldr(s4, MemOperand(x5, 16380)), "ldr s4, [x5, #16380]");
+ COMPARE(ldr(d6, MemOperand(x7)), "ldr d6, [x7]");
+ COMPARE(ldr(d8, MemOperand(x9, 8)), "ldr d8, [x9, #8]");
+ COMPARE(ldr(d10, MemOperand(x11, 32760)), "ldr d10, [x11, #32760]");
+ COMPARE(str(s12, MemOperand(x13)), "str s12, [x13]");
+ COMPARE(str(s14, MemOperand(x15, 4)), "str s14, [x15, #4]");
+ COMPARE(str(s16, MemOperand(x17, 16380)), "str s16, [x17, #16380]");
+ COMPARE(str(d18, MemOperand(x19)), "str d18, [x19]");
+ COMPARE(str(d20, MemOperand(x21, 8)), "str d20, [x21, #8]");
+ COMPARE(str(d22, MemOperand(x23, 32760)), "str d22, [x23, #32760]");
+
+ COMPARE(ldr(s0, MemOperand(x1, 4, PreIndex)), "ldr s0, [x1, #4]!");
+ COMPARE(ldr(s2, MemOperand(x3, 255, PreIndex)), "ldr s2, [x3, #255]!");
+ COMPARE(ldr(s4, MemOperand(x5, -256, PreIndex)), "ldr s4, [x5, #-256]!");
+ COMPARE(ldr(d6, MemOperand(x7, 8, PreIndex)), "ldr d6, [x7, #8]!");
+ COMPARE(ldr(d8, MemOperand(x9, 255, PreIndex)), "ldr d8, [x9, #255]!");
+ COMPARE(ldr(d10, MemOperand(x11, -256, PreIndex)), "ldr d10, [x11, #-256]!");
+ COMPARE(str(s12, MemOperand(x13, 4, PreIndex)), "str s12, [x13, #4]!");
+ COMPARE(str(s14, MemOperand(x15, 255, PreIndex)), "str s14, [x15, #255]!");
+ COMPARE(str(s16, MemOperand(x17, -256, PreIndex)), "str s16, [x17, #-256]!");
+ COMPARE(str(d18, MemOperand(x19, 8, PreIndex)), "str d18, [x19, #8]!");
+ COMPARE(str(d20, MemOperand(x21, 255, PreIndex)), "str d20, [x21, #255]!");
+ COMPARE(str(d22, MemOperand(x23, -256, PreIndex)), "str d22, [x23, #-256]!");
+
+ COMPARE(ldr(s0, MemOperand(x1, 4, PostIndex)), "ldr s0, [x1], #4");
+ COMPARE(ldr(s2, MemOperand(x3, 255, PostIndex)), "ldr s2, [x3], #255");
+ COMPARE(ldr(s4, MemOperand(x5, -256, PostIndex)), "ldr s4, [x5], #-256");
+ COMPARE(ldr(d6, MemOperand(x7, 8, PostIndex)), "ldr d6, [x7], #8");
+ COMPARE(ldr(d8, MemOperand(x9, 255, PostIndex)), "ldr d8, [x9], #255");
+ COMPARE(ldr(d10, MemOperand(x11, -256, PostIndex)), "ldr d10, [x11], #-256");
+ COMPARE(str(s12, MemOperand(x13, 4, PostIndex)), "str s12, [x13], #4");
+ COMPARE(str(s14, MemOperand(x15, 255, PostIndex)), "str s14, [x15], #255");
+ COMPARE(str(s16, MemOperand(x17, -256, PostIndex)), "str s16, [x17], #-256");
+ COMPARE(str(d18, MemOperand(x19, 8, PostIndex)), "str d18, [x19], #8");
+ COMPARE(str(d20, MemOperand(x21, 255, PostIndex)), "str d20, [x21], #255");
+ COMPARE(str(d22, MemOperand(x23, -256, PostIndex)), "str d22, [x23], #-256");
+
+ // TODO(all): Fix this for jssp.
+ COMPARE(ldr(s24, MemOperand(jssp)), "ldr s24, [jssp]");
+ COMPARE(ldr(d25, MemOperand(jssp, 8)), "ldr d25, [jssp, #8]");
+ COMPARE(str(s26, MemOperand(jssp, 4, PreIndex)), "str s26, [jssp, #4]!");
+ COMPARE(str(d27, MemOperand(jssp, -8, PostIndex)), "str d27, [jssp], #-8");
+
+ CLEANUP();
+}
+
+
+TEST_(load_store_unscaled) {
+ SET_UP();
+
+ COMPARE(ldr(w0, MemOperand(x1, 1)), "ldur w0, [x1, #1]");
+ COMPARE(ldr(w2, MemOperand(x3, -1)), "ldur w2, [x3, #-1]");
+ COMPARE(ldr(w4, MemOperand(x5, 255)), "ldur w4, [x5, #255]");
+ COMPARE(ldr(w6, MemOperand(x7, -256)), "ldur w6, [x7, #-256]");
+ COMPARE(ldr(x8, MemOperand(x9, 1)), "ldur x8, [x9, #1]");
+ COMPARE(ldr(x10, MemOperand(x11, -1)), "ldur x10, [x11, #-1]");
+ COMPARE(ldr(x12, MemOperand(x13, 255)), "ldur x12, [x13, #255]");
+ COMPARE(ldr(x14, MemOperand(x15, -256)), "ldur x14, [x15, #-256]");
+ COMPARE(str(w16, MemOperand(x17, 1)), "stur w16, [x17, #1]");
+ COMPARE(str(w18, MemOperand(x19, -1)), "stur w18, [x19, #-1]");
+ COMPARE(str(w20, MemOperand(x21, 255)), "stur w20, [x21, #255]");
+ COMPARE(str(w22, MemOperand(x23, -256)), "stur w22, [x23, #-256]");
+ COMPARE(str(x24, MemOperand(x25, 1)), "stur x24, [x25, #1]");
+ COMPARE(str(x26, MemOperand(cp, -1)), "stur x26, [cp, #-1]");
+ COMPARE(str(jssp, MemOperand(fp, 255)), "stur jssp, [fp, #255]");
+ COMPARE(str(lr, MemOperand(x0, -256)), "stur lr, [x0, #-256]");
+ COMPARE(ldr(w0, MemOperand(csp, 1)), "ldur w0, [csp, #1]");
+ COMPARE(str(x1, MemOperand(csp, -1)), "stur x1, [csp, #-1]");
+ COMPARE(ldrb(w2, MemOperand(x3, -2)), "ldurb w2, [x3, #-2]");
+ COMPARE(ldrsb(w4, MemOperand(x5, -3)), "ldursb w4, [x5, #-3]");
+ COMPARE(ldrsb(x6, MemOperand(x7, -4)), "ldursb x6, [x7, #-4]");
+ COMPARE(ldrh(w8, MemOperand(x9, -5)), "ldurh w8, [x9, #-5]");
+ COMPARE(ldrsh(w10, MemOperand(x11, -6)), "ldursh w10, [x11, #-6]");
+ COMPARE(ldrsh(x12, MemOperand(x13, -7)), "ldursh x12, [x13, #-7]");
+ COMPARE(ldrsw(x14, MemOperand(x15, -8)), "ldursw x14, [x15, #-8]");
+
+ CLEANUP();
+}
+
+
+TEST_(load_store_pair) {
+ SET_UP();
+
+ COMPARE(ldp(w0, w1, MemOperand(x2)), "ldp w0, w1, [x2]");
+ COMPARE(ldp(x3, x4, MemOperand(x5)), "ldp x3, x4, [x5]");
+ COMPARE(ldp(w6, w7, MemOperand(x8, 4)), "ldp w6, w7, [x8, #4]");
+ COMPARE(ldp(x9, x10, MemOperand(x11, 8)), "ldp x9, x10, [x11, #8]");
+ COMPARE(ldp(w12, w13, MemOperand(x14, 252)), "ldp w12, w13, [x14, #252]");
+ COMPARE(ldp(x15, x16, MemOperand(x17, 504)), "ldp x15, x16, [x17, #504]");
+ COMPARE(ldp(w18, w19, MemOperand(x20, -256)), "ldp w18, w19, [x20, #-256]");
+ COMPARE(ldp(x21, x22, MemOperand(x23, -512)), "ldp x21, x22, [x23, #-512]");
+ COMPARE(ldp(w24, w25, MemOperand(x26, 252, PreIndex)),
+ "ldp w24, w25, [x26, #252]!");
+ COMPARE(ldp(cp, jssp, MemOperand(fp, 504, PreIndex)),
+ "ldp cp, jssp, [fp, #504]!");
+ COMPARE(ldp(w30, w0, MemOperand(x1, -256, PreIndex)),
+ "ldp w30, w0, [x1, #-256]!");
+ COMPARE(ldp(x2, x3, MemOperand(x4, -512, PreIndex)),
+ "ldp x2, x3, [x4, #-512]!");
+ COMPARE(ldp(w5, w6, MemOperand(x7, 252, PostIndex)),
+ "ldp w5, w6, [x7], #252");
+ COMPARE(ldp(x8, x9, MemOperand(x10, 504, PostIndex)),
+ "ldp x8, x9, [x10], #504");
+ COMPARE(ldp(w11, w12, MemOperand(x13, -256, PostIndex)),
+ "ldp w11, w12, [x13], #-256");
+ COMPARE(ldp(x14, x15, MemOperand(x16, -512, PostIndex)),
+ "ldp x14, x15, [x16], #-512");
+
+ COMPARE(ldp(s17, s18, MemOperand(x19)), "ldp s17, s18, [x19]");
+ COMPARE(ldp(s20, s21, MemOperand(x22, 252)), "ldp s20, s21, [x22, #252]");
+ COMPARE(ldp(s23, s24, MemOperand(x25, -256)), "ldp s23, s24, [x25, #-256]");
+ COMPARE(ldp(s26, s27, MemOperand(jssp, 252, PreIndex)),
+ "ldp s26, s27, [jssp, #252]!");
+ COMPARE(ldp(s29, s30, MemOperand(fp, -256, PreIndex)),
+ "ldp s29, s30, [fp, #-256]!");
+ COMPARE(ldp(s31, s0, MemOperand(x1, 252, PostIndex)),
+ "ldp s31, s0, [x1], #252");
+ COMPARE(ldp(s2, s3, MemOperand(x4, -256, PostIndex)),
+ "ldp s2, s3, [x4], #-256");
+ COMPARE(ldp(d17, d18, MemOperand(x19)), "ldp d17, d18, [x19]");
+ COMPARE(ldp(d20, d21, MemOperand(x22, 504)), "ldp d20, d21, [x22, #504]");
+ COMPARE(ldp(d23, d24, MemOperand(x25, -512)), "ldp d23, d24, [x25, #-512]");
+ COMPARE(ldp(d26, d27, MemOperand(jssp, 504, PreIndex)),
+ "ldp d26, d27, [jssp, #504]!");
+ COMPARE(ldp(d29, d30, MemOperand(fp, -512, PreIndex)),
+ "ldp d29, d30, [fp, #-512]!");
+ COMPARE(ldp(d31, d0, MemOperand(x1, 504, PostIndex)),
+ "ldp d31, d0, [x1], #504");
+ COMPARE(ldp(d2, d3, MemOperand(x4, -512, PostIndex)),
+ "ldp d2, d3, [x4], #-512");
+
+ COMPARE(stp(w0, w1, MemOperand(x2)), "stp w0, w1, [x2]");
+ COMPARE(stp(x3, x4, MemOperand(x5)), "stp x3, x4, [x5]");
+ COMPARE(stp(w6, w7, MemOperand(x8, 4)), "stp w6, w7, [x8, #4]");
+ COMPARE(stp(x9, x10, MemOperand(x11, 8)), "stp x9, x10, [x11, #8]");
+ COMPARE(stp(w12, w13, MemOperand(x14, 252)), "stp w12, w13, [x14, #252]");
+ COMPARE(stp(x15, x16, MemOperand(x17, 504)), "stp x15, x16, [x17, #504]");
+ COMPARE(stp(w18, w19, MemOperand(x20, -256)), "stp w18, w19, [x20, #-256]");
+ COMPARE(stp(x21, x22, MemOperand(x23, -512)), "stp x21, x22, [x23, #-512]");
+ COMPARE(stp(w24, w25, MemOperand(x26, 252, PreIndex)),
+ "stp w24, w25, [x26, #252]!");
+ COMPARE(stp(cp, jssp, MemOperand(fp, 504, PreIndex)),
+ "stp cp, jssp, [fp, #504]!");
+ COMPARE(stp(w30, w0, MemOperand(x1, -256, PreIndex)),
+ "stp w30, w0, [x1, #-256]!");
+ COMPARE(stp(x2, x3, MemOperand(x4, -512, PreIndex)),
+ "stp x2, x3, [x4, #-512]!");
+ COMPARE(stp(w5, w6, MemOperand(x7, 252, PostIndex)),
+ "stp w5, w6, [x7], #252");
+ COMPARE(stp(x8, x9, MemOperand(x10, 504, PostIndex)),
+ "stp x8, x9, [x10], #504");
+ COMPARE(stp(w11, w12, MemOperand(x13, -256, PostIndex)),
+ "stp w11, w12, [x13], #-256");
+ COMPARE(stp(x14, x15, MemOperand(x16, -512, PostIndex)),
+ "stp x14, x15, [x16], #-512");
+
+ COMPARE(stp(s17, s18, MemOperand(x19)), "stp s17, s18, [x19]");
+ COMPARE(stp(s20, s21, MemOperand(x22, 252)), "stp s20, s21, [x22, #252]");
+ COMPARE(stp(s23, s24, MemOperand(x25, -256)), "stp s23, s24, [x25, #-256]");
+ COMPARE(stp(s26, s27, MemOperand(jssp, 252, PreIndex)),
+ "stp s26, s27, [jssp, #252]!");
+ COMPARE(stp(s29, s30, MemOperand(fp, -256, PreIndex)),
+ "stp s29, s30, [fp, #-256]!");
+ COMPARE(stp(s31, s0, MemOperand(x1, 252, PostIndex)),
+ "stp s31, s0, [x1], #252");
+ COMPARE(stp(s2, s3, MemOperand(x4, -256, PostIndex)),
+ "stp s2, s3, [x4], #-256");
+ COMPARE(stp(d17, d18, MemOperand(x19)), "stp d17, d18, [x19]");
+ COMPARE(stp(d20, d21, MemOperand(x22, 504)), "stp d20, d21, [x22, #504]");
+ COMPARE(stp(d23, d24, MemOperand(x25, -512)), "stp d23, d24, [x25, #-512]");
+ COMPARE(stp(d26, d27, MemOperand(jssp, 504, PreIndex)),
+ "stp d26, d27, [jssp, #504]!");
+ COMPARE(stp(d29, d30, MemOperand(fp, -512, PreIndex)),
+ "stp d29, d30, [fp, #-512]!");
+ COMPARE(stp(d31, d0, MemOperand(x1, 504, PostIndex)),
+ "stp d31, d0, [x1], #504");
+ COMPARE(stp(d2, d3, MemOperand(x4, -512, PostIndex)),
+ "stp d2, d3, [x4], #-512");
+
+ // TODO(all): Update / Restore this test.
+ COMPARE(ldp(w16, w17, MemOperand(jssp, 4, PostIndex)),
+ "ldp w16, w17, [jssp], #4");
+ COMPARE(stp(x18, x19, MemOperand(jssp, -8, PreIndex)),
+ "stp x18, x19, [jssp, #-8]!");
+ COMPARE(ldp(s30, s31, MemOperand(jssp, 12, PostIndex)),
+ "ldp s30, s31, [jssp], #12");
+ COMPARE(stp(d30, d31, MemOperand(jssp, -16)),
+ "stp d30, d31, [jssp, #-16]");
+
+ COMPARE(ldpsw(x0, x1, MemOperand(x2)), "ldpsw x0, x1, [x2]");
+ COMPARE(ldpsw(x3, x4, MemOperand(x5, 16)), "ldpsw x3, x4, [x5, #16]");
+ COMPARE(ldpsw(x6, x7, MemOperand(x8, -32, PreIndex)),
+ "ldpsw x6, x7, [x8, #-32]!");
+ COMPARE(ldpsw(x9, x10, MemOperand(x11, 128, PostIndex)),
+ "ldpsw x9, x10, [x11], #128");
+
+ CLEANUP();
+}
+
+
+TEST_(load_store_pair_nontemp) {
+ SET_UP();
+
+ COMPARE(ldnp(w0, w1, MemOperand(x2)), "ldnp w0, w1, [x2]");
+ COMPARE(stnp(w3, w4, MemOperand(x5, 252)), "stnp w3, w4, [x5, #252]");
+ COMPARE(ldnp(w6, w7, MemOperand(x8, -256)), "ldnp w6, w7, [x8, #-256]");
+ COMPARE(stnp(x9, x10, MemOperand(x11)), "stnp x9, x10, [x11]");
+ COMPARE(ldnp(x12, x13, MemOperand(x14, 504)), "ldnp x12, x13, [x14, #504]");
+ COMPARE(stnp(x15, x16, MemOperand(x17, -512)), "stnp x15, x16, [x17, #-512]");
+ COMPARE(ldnp(s18, s19, MemOperand(x20)), "ldnp s18, s19, [x20]");
+ COMPARE(stnp(s21, s22, MemOperand(x23, 252)), "stnp s21, s22, [x23, #252]");
+ COMPARE(ldnp(s24, s25, MemOperand(x26, -256)), "ldnp s24, s25, [x26, #-256]");
+ COMPARE(stnp(d27, d28, MemOperand(fp)), "stnp d27, d28, [fp]");
+ COMPARE(ldnp(d30, d31, MemOperand(x0, 504)), "ldnp d30, d31, [x0, #504]");
+ COMPARE(stnp(d1, d2, MemOperand(x3, -512)), "stnp d1, d2, [x3, #-512]");
+
+ CLEANUP();
+}
+
+#if 0 // TODO(all): enable.
+TEST_(load_literal) {
+ SET_UP();
+
+ COMPARE_PREFIX(ldr(x10, 0x1234567890abcdefUL), "ldr x10, pc+8");
+ COMPARE_PREFIX(ldr(w20, 0xfedcba09), "ldr w20, pc+8");
+ COMPARE_PREFIX(ldr(d11, 1.234), "ldr d11, pc+8");
+ COMPARE_PREFIX(ldr(s22, 2.5f), "ldr s22, pc+8");
+
+ CLEANUP();
+}
+#endif
+
+TEST_(cond_select) {
+ SET_UP();
+
+ COMPARE(csel(w0, w1, w2, eq), "csel w0, w1, w2, eq");
+ COMPARE(csel(x3, x4, x5, ne), "csel x3, x4, x5, ne");
+ COMPARE(csinc(w6, w7, w8, hs), "csinc w6, w7, w8, hs");
+ COMPARE(csinc(x9, x10, x11, lo), "csinc x9, x10, x11, lo");
+ COMPARE(csinv(w12, w13, w14, mi), "csinv w12, w13, w14, mi");
+ COMPARE(csinv(x15, x16, x17, pl), "csinv x15, x16, x17, pl");
+ COMPARE(csneg(w18, w19, w20, vs), "csneg w18, w19, w20, vs");
+ COMPARE(csneg(x21, x22, x23, vc), "csneg x21, x22, x23, vc");
+ COMPARE(cset(w24, hi), "cset w24, hi");
+ COMPARE(cset(x25, ls), "cset x25, ls");
+ COMPARE(csetm(w26, ge), "csetm w26, ge");
+ COMPARE(csetm(cp, lt), "csetm cp, lt");
+ COMPARE(cinc(w28, w29, gt), "cinc w28, w29, gt");
+ COMPARE(cinc(lr, x0, le), "cinc lr, x0, le");
+ COMPARE(cinv(w1, w2, eq), "cinv w1, w2, eq");
+ COMPARE(cinv(x3, x4, ne), "cinv x3, x4, ne");
+ COMPARE(cneg(w5, w6, hs), "cneg w5, w6, hs");
+ COMPARE(cneg(x7, x8, lo), "cneg x7, x8, lo");
+
+ COMPARE(csel(x0, x1, x2, al), "csel x0, x1, x2, al");
+ COMPARE(csel(x1, x2, x3, nv), "csel x1, x2, x3, nv");
+ COMPARE(csinc(x2, x3, x4, al), "csinc x2, x3, x4, al");
+ COMPARE(csinc(x3, x4, x5, nv), "csinc x3, x4, x5, nv");
+ COMPARE(csinv(x4, x5, x6, al), "csinv x4, x5, x6, al");
+ COMPARE(csinv(x5, x6, x7, nv), "csinv x5, x6, x7, nv");
+ COMPARE(csneg(x6, x7, x8, al), "csneg x6, x7, x8, al");
+ COMPARE(csneg(x7, x8, x9, nv), "csneg x7, x8, x9, nv");
+
+ CLEANUP();
+}
+
+
+TEST(cond_select_macro) {
+ SET_UP_CLASS(MacroAssembler);
+
+ COMPARE(Csel(w0, w1, -1, eq), "csinv w0, w1, wzr, eq");
+ COMPARE(Csel(w2, w3, 0, ne), "csel w2, w3, wzr, ne");
+ COMPARE(Csel(w4, w5, 1, hs), "csinc w4, w5, wzr, hs");
+ COMPARE(Csel(x6, x7, -1, lo), "csinv x6, x7, xzr, lo");
+ COMPARE(Csel(x8, x9, 0, mi), "csel x8, x9, xzr, mi");
+ COMPARE(Csel(x10, x11, 1, pl), "csinc x10, x11, xzr, pl");
+
+ CLEANUP();
+}
+
+
+TEST_(cond_cmp) {
+ SET_UP();
+
+ COMPARE(ccmn(w0, w1, NZCVFlag, eq), "ccmn w0, w1, #NZCV, eq");
+ COMPARE(ccmn(x2, x3, NZCFlag, ne), "ccmn x2, x3, #NZCv, ne");
+ COMPARE(ccmp(w4, w5, NZVFlag, hs), "ccmp w4, w5, #NZcV, hs");
+ COMPARE(ccmp(x6, x7, NZFlag, lo), "ccmp x6, x7, #NZcv, lo");
+ COMPARE(ccmn(w8, 31, NFlag, mi), "ccmn w8, #31, #Nzcv, mi");
+ COMPARE(ccmn(x9, 30, NCFlag, pl), "ccmn x9, #30, #NzCv, pl");
+ COMPARE(ccmp(w10, 29, NVFlag, vs), "ccmp w10, #29, #NzcV, vs");
+ COMPARE(ccmp(x11, 28, NFlag, vc), "ccmp x11, #28, #Nzcv, vc");
+ COMPARE(ccmn(w12, w13, NoFlag, al), "ccmn w12, w13, #nzcv, al");
+ COMPARE(ccmp(x14, 27, ZVFlag, nv), "ccmp x14, #27, #nZcV, nv");
+
+ CLEANUP();
+}
+
+
+TEST_(cond_cmp_macro) {
+ SET_UP_CLASS(MacroAssembler);
+
+ COMPARE(Ccmp(w0, -1, VFlag, hi), "ccmn w0, #1, #nzcV, hi");
+ COMPARE(Ccmp(x1, -31, CFlag, ge), "ccmn x1, #31, #nzCv, ge");
+ COMPARE(Ccmn(w2, -1, CVFlag, gt), "ccmp w2, #1, #nzCV, gt");
+ COMPARE(Ccmn(x3, -31, ZCVFlag, ls), "ccmp x3, #31, #nZCV, ls");
+
+ CLEANUP();
+}
+
+
+TEST_(fmov_imm) {
+ SET_UP();
+
+ COMPARE(fmov(s0, 1.0f), "fmov s0, #0x70 (1.0000)");
+ COMPARE(fmov(s31, -13.0f), "fmov s31, #0xaa (-13.0000)");
+ COMPARE(fmov(d1, 1.0), "fmov d1, #0x70 (1.0000)");
+ COMPARE(fmov(d29, -13.0), "fmov d29, #0xaa (-13.0000)");
+
+ CLEANUP();
+}
+
+
+TEST_(fmov_reg) {
+ SET_UP();
+
+ COMPARE(fmov(w3, s13), "fmov w3, s13");
+ COMPARE(fmov(x6, d26), "fmov x6, d26");
+ COMPARE(fmov(s11, w30), "fmov s11, w30");
+ COMPARE(fmov(d31, x2), "fmov d31, x2");
+ COMPARE(fmov(s12, s13), "fmov s12, s13");
+ COMPARE(fmov(d22, d23), "fmov d22, d23");
+
+ CLEANUP();
+}
+
+
+TEST_(fp_dp1) {
+ SET_UP();
+
+ COMPARE(fabs(s0, s1), "fabs s0, s1");
+ COMPARE(fabs(s31, s30), "fabs s31, s30");
+ COMPARE(fabs(d2, d3), "fabs d2, d3");
+ COMPARE(fabs(d31, d30), "fabs d31, d30");
+ COMPARE(fneg(s4, s5), "fneg s4, s5");
+ COMPARE(fneg(s31, s30), "fneg s31, s30");
+ COMPARE(fneg(d6, d7), "fneg d6, d7");
+ COMPARE(fneg(d31, d30), "fneg d31, d30");
+ COMPARE(fsqrt(s8, s9), "fsqrt s8, s9");
+ COMPARE(fsqrt(s31, s30), "fsqrt s31, s30");
+ COMPARE(fsqrt(d10, d11), "fsqrt d10, d11");
+ COMPARE(fsqrt(d31, d30), "fsqrt d31, d30");
+ COMPARE(frinta(s10, s11), "frinta s10, s11");
+ COMPARE(frinta(s31, s30), "frinta s31, s30");
+ COMPARE(frinta(d12, d13), "frinta d12, d13");
+ COMPARE(frinta(d31, d30), "frinta d31, d30");
+ COMPARE(frintn(s10, s11), "frintn s10, s11");
+ COMPARE(frintn(s31, s30), "frintn s31, s30");
+ COMPARE(frintn(d12, d13), "frintn d12, d13");
+ COMPARE(frintn(d31, d30), "frintn d31, d30");
+ COMPARE(frintz(s10, s11), "frintz s10, s11");
+ COMPARE(frintz(s31, s30), "frintz s31, s30");
+ COMPARE(frintz(d12, d13), "frintz d12, d13");
+ COMPARE(frintz(d31, d30), "frintz d31, d30");
+ COMPARE(fcvt(d14, s15), "fcvt d14, s15");
+ COMPARE(fcvt(d31, s31), "fcvt d31, s31");
+
+ CLEANUP();
+}
+
+
+TEST_(fp_dp2) {
+ SET_UP();
+
+ COMPARE(fadd(s0, s1, s2), "fadd s0, s1, s2");
+ COMPARE(fadd(d3, d4, d5), "fadd d3, d4, d5");
+ COMPARE(fsub(s31, s30, s29), "fsub s31, s30, s29");
+ COMPARE(fsub(d31, d30, d29), "fsub d31, d30, d29");
+ COMPARE(fmul(s7, s8, s9), "fmul s7, s8, s9");
+ COMPARE(fmul(d10, d11, d12), "fmul d10, d11, d12");
+ COMPARE(fdiv(s13, s14, s15), "fdiv s13, s14, s15");
+ COMPARE(fdiv(d16, d17, d18), "fdiv d16, d17, d18");
+ COMPARE(fmax(s19, s20, s21), "fmax s19, s20, s21");
+ COMPARE(fmax(d22, d23, d24), "fmax d22, d23, d24");
+ COMPARE(fmin(s25, s26, s27), "fmin s25, s26, s27");
+ COMPARE(fmin(d28, d29, d30), "fmin d28, d29, d30");
+ COMPARE(fmaxnm(s31, s0, s1), "fmaxnm s31, s0, s1");
+ COMPARE(fmaxnm(d2, d3, d4), "fmaxnm d2, d3, d4");
+ COMPARE(fminnm(s5, s6, s7), "fminnm s5, s6, s7");
+ COMPARE(fminnm(d8, d9, d10), "fminnm d8, d9, d10");
+
+ CLEANUP();
+}
+
+
+TEST(fp_dp3) {
+ SET_UP();
+
+ COMPARE(fmadd(s7, s8, s9, s10), "fmadd s7, s8, s9, s10");
+ COMPARE(fmadd(d10, d11, d12, d10), "fmadd d10, d11, d12, d10");
+ COMPARE(fmsub(s7, s8, s9, s10), "fmsub s7, s8, s9, s10");
+ COMPARE(fmsub(d10, d11, d12, d10), "fmsub d10, d11, d12, d10");
+
+ COMPARE(fnmadd(s7, s8, s9, s10), "fnmadd s7, s8, s9, s10");
+ COMPARE(fnmadd(d10, d11, d12, d10), "fnmadd d10, d11, d12, d10");
+ COMPARE(fnmsub(s7, s8, s9, s10), "fnmsub s7, s8, s9, s10");
+ COMPARE(fnmsub(d10, d11, d12, d10), "fnmsub d10, d11, d12, d10");
+
+ CLEANUP();
+}
+
+
+TEST_(fp_compare) {
+ SET_UP();
+
+ COMPARE(fcmp(s0, s1), "fcmp s0, s1");
+ COMPARE(fcmp(s31, s30), "fcmp s31, s30");
+ COMPARE(fcmp(d0, d1), "fcmp d0, d1");
+ COMPARE(fcmp(d31, d30), "fcmp d31, d30");
+ COMPARE(fcmp(s12, 0), "fcmp s12, #0.0");
+ COMPARE(fcmp(d12, 0), "fcmp d12, #0.0");
+
+ CLEANUP();
+}
+
+
+TEST_(fp_cond_compare) {
+ SET_UP();
+
+ COMPARE(fccmp(s0, s1, NoFlag, eq), "fccmp s0, s1, #nzcv, eq");
+ COMPARE(fccmp(s2, s3, ZVFlag, ne), "fccmp s2, s3, #nZcV, ne");
+ COMPARE(fccmp(s30, s16, NCFlag, pl), "fccmp s30, s16, #NzCv, pl");
+ COMPARE(fccmp(s31, s31, NZCVFlag, le), "fccmp s31, s31, #NZCV, le");
+ COMPARE(fccmp(d4, d5, VFlag, gt), "fccmp d4, d5, #nzcV, gt");
+ COMPARE(fccmp(d6, d7, NFlag, vs), "fccmp d6, d7, #Nzcv, vs");
+ COMPARE(fccmp(d30, d0, NZFlag, vc), "fccmp d30, d0, #NZcv, vc");
+ COMPARE(fccmp(d31, d31, ZFlag, hs), "fccmp d31, d31, #nZcv, hs");
+ COMPARE(fccmp(s14, s15, CVFlag, al), "fccmp s14, s15, #nzCV, al");
+ COMPARE(fccmp(d16, d17, CFlag, nv), "fccmp d16, d17, #nzCv, nv");
+
+ CLEANUP();
+}
+
+
+TEST_(fp_select) {
+ SET_UP();
+
+ COMPARE(fcsel(s0, s1, s2, eq), "fcsel s0, s1, s2, eq")
+ COMPARE(fcsel(s31, s31, s30, ne), "fcsel s31, s31, s30, ne");
+ COMPARE(fcsel(d0, d1, d2, mi), "fcsel d0, d1, d2, mi");
+ COMPARE(fcsel(d31, d30, d31, pl), "fcsel d31, d30, d31, pl");
+ COMPARE(fcsel(s14, s15, s16, al), "fcsel s14, s15, s16, al");
+ COMPARE(fcsel(d17, d18, d19, nv), "fcsel d17, d18, d19, nv");
+
+ CLEANUP();
+}
+
+
+TEST_(fcvt_scvtf_ucvtf) {
+ SET_UP();
+
+ COMPARE(fcvtas(w0, s1), "fcvtas w0, s1");
+ COMPARE(fcvtas(x2, s3), "fcvtas x2, s3");
+ COMPARE(fcvtas(w4, d5), "fcvtas w4, d5");
+ COMPARE(fcvtas(x6, d7), "fcvtas x6, d7");
+ COMPARE(fcvtau(w8, s9), "fcvtau w8, s9");
+ COMPARE(fcvtau(x10, s11), "fcvtau x10, s11");
+ COMPARE(fcvtau(w12, d13), "fcvtau w12, d13");
+ COMPARE(fcvtau(x14, d15), "fcvtau x14, d15");
+ COMPARE(fcvtns(w0, s1), "fcvtns w0, s1");
+ COMPARE(fcvtns(x2, s3), "fcvtns x2, s3");
+ COMPARE(fcvtns(w4, d5), "fcvtns w4, d5");
+ COMPARE(fcvtns(x6, d7), "fcvtns x6, d7");
+ COMPARE(fcvtnu(w8, s9), "fcvtnu w8, s9");
+ COMPARE(fcvtnu(x10, s11), "fcvtnu x10, s11");
+ COMPARE(fcvtnu(w12, d13), "fcvtnu w12, d13");
+ COMPARE(fcvtnu(x14, d15), "fcvtnu x14, d15");
+ COMPARE(fcvtzu(x16, d17), "fcvtzu x16, d17");
+ COMPARE(fcvtzu(w18, d19), "fcvtzu w18, d19");
+ COMPARE(fcvtzs(x20, d21), "fcvtzs x20, d21");
+ COMPARE(fcvtzs(w22, d23), "fcvtzs w22, d23");
+ COMPARE(fcvtzu(x16, s17), "fcvtzu x16, s17");
+ COMPARE(fcvtzu(w18, s19), "fcvtzu w18, s19");
+ COMPARE(fcvtzs(x20, s21), "fcvtzs x20, s21");
+ COMPARE(fcvtzs(w22, s23), "fcvtzs w22, s23");
+ COMPARE(scvtf(d24, w25), "scvtf d24, w25");
+ COMPARE(scvtf(s24, w25), "scvtf s24, w25");
+ COMPARE(scvtf(d26, x0), "scvtf d26, x0");
+ COMPARE(scvtf(s26, x0), "scvtf s26, x0");
+ COMPARE(ucvtf(d28, w29), "ucvtf d28, w29");
+ COMPARE(ucvtf(s28, w29), "ucvtf s28, w29");
+ COMPARE(ucvtf(d0, x1), "ucvtf d0, x1");
+ COMPARE(ucvtf(s0, x1), "ucvtf s0, x1");
+ COMPARE(ucvtf(d0, x1, 0), "ucvtf d0, x1");
+ COMPARE(ucvtf(s0, x1, 0), "ucvtf s0, x1");
+ COMPARE(scvtf(d1, x2, 1), "scvtf d1, x2, #1");
+ COMPARE(scvtf(s1, x2, 1), "scvtf s1, x2, #1");
+ COMPARE(scvtf(d3, x4, 15), "scvtf d3, x4, #15");
+ COMPARE(scvtf(s3, x4, 15), "scvtf s3, x4, #15");
+ COMPARE(scvtf(d5, x6, 32), "scvtf d5, x6, #32");
+ COMPARE(scvtf(s5, x6, 32), "scvtf s5, x6, #32");
+ COMPARE(ucvtf(d7, x8, 2), "ucvtf d7, x8, #2");
+ COMPARE(ucvtf(s7, x8, 2), "ucvtf s7, x8, #2");
+ COMPARE(ucvtf(d9, x10, 16), "ucvtf d9, x10, #16");
+ COMPARE(ucvtf(s9, x10, 16), "ucvtf s9, x10, #16");
+ COMPARE(ucvtf(d11, x12, 33), "ucvtf d11, x12, #33");
+ COMPARE(ucvtf(s11, x12, 33), "ucvtf s11, x12, #33");
+ COMPARE(fcvtms(w0, s1), "fcvtms w0, s1");
+ COMPARE(fcvtms(x2, s3), "fcvtms x2, s3");
+ COMPARE(fcvtms(w4, d5), "fcvtms w4, d5");
+ COMPARE(fcvtms(x6, d7), "fcvtms x6, d7");
+ COMPARE(fcvtmu(w8, s9), "fcvtmu w8, s9");
+ COMPARE(fcvtmu(x10, s11), "fcvtmu x10, s11");
+ COMPARE(fcvtmu(w12, d13), "fcvtmu w12, d13");
+ COMPARE(fcvtmu(x14, d15), "fcvtmu x14, d15");
+
+ CLEANUP();
+}
+
+
+TEST_(system_mrs) {
+ SET_UP();
+
+ COMPARE(mrs(x0, NZCV), "mrs x0, nzcv");
+ COMPARE(mrs(lr, NZCV), "mrs lr, nzcv");
+ COMPARE(mrs(x15, FPCR), "mrs x15, fpcr");
+
+ CLEANUP();
+}
+
+
+TEST_(system_msr) {
+ SET_UP();
+
+ COMPARE(msr(NZCV, x0), "msr nzcv, x0");
+ COMPARE(msr(NZCV, x30), "msr nzcv, lr");
+ COMPARE(msr(FPCR, x15), "msr fpcr, x15");
+
+ CLEANUP();
+}
+
+
+TEST_(system_nop) {
+ SET_UP();
+
+ COMPARE(nop(), "nop");
+
+ CLEANUP();
+}
+
+
+TEST_(debug) {
+ SET_UP();
+
+ ASSERT(kImmExceptionIsDebug == 0xdeb0);
+
+ // All debug codes should produce the same instruction, and the debug code
+ // can be any uint32_t.
+ COMPARE(debug("message", 0, NO_PARAM), "hlt #0xdeb0");
+ COMPARE(debug("message", 1, NO_PARAM), "hlt #0xdeb0");
+ COMPARE(debug("message", 0xffff, NO_PARAM), "hlt #0xdeb0");
+ COMPARE(debug("message", 0x10000, NO_PARAM), "hlt #0xdeb0");
+ COMPARE(debug("message", 0x7fffffff, NO_PARAM), "hlt #0xdeb0");
+ COMPARE(debug("message", 0x80000000u, NO_PARAM), "hlt #0xdeb0");
+ COMPARE(debug("message", 0xffffffffu, NO_PARAM), "hlt #0xdeb0");
+
+ CLEANUP();
+}
+
+
+TEST_(hlt) {
+ SET_UP();
+
+ COMPARE(hlt(0), "hlt #0x0");
+ COMPARE(hlt(1), "hlt #0x1");
+ COMPARE(hlt(65535), "hlt #0xffff");
+
+ CLEANUP();
+}
+
+
+TEST_(brk) {
+ SET_UP();
+
+ COMPARE(brk(0), "brk #0x0");
+ COMPARE(brk(1), "brk #0x1");
+ COMPARE(brk(65535), "brk #0xffff");
+
+ CLEANUP();
+}
+
+
+TEST_(add_sub_negative) {
+ SET_UP_CLASS(MacroAssembler);
+
+ COMPARE(Add(x10, x0, -42), "sub x10, x0, #0x2a (42)");
+ COMPARE(Add(x11, x1, -687), "sub x11, x1, #0x2af (687)");
+ COMPARE(Add(x12, x2, -0x88), "sub x12, x2, #0x88 (136)");
+
+ COMPARE(Sub(x13, x0, -600), "add x13, x0, #0x258 (600)");
+ COMPARE(Sub(x14, x1, -313), "add x14, x1, #0x139 (313)");
+ COMPARE(Sub(x15, x2, -0x555), "add x15, x2, #0x555 (1365)");
+
+ COMPARE(Add(w19, w3, -0x344), "sub w19, w3, #0x344 (836)");
+ COMPARE(Add(w20, w4, -2000), "sub w20, w4, #0x7d0 (2000)");
+
+ COMPARE(Sub(w21, w3, -0xbc), "add w21, w3, #0xbc (188)");
+ COMPARE(Sub(w22, w4, -2000), "add w22, w4, #0x7d0 (2000)");
+
+ COMPARE(Cmp(w0, -1), "cmn w0, #0x1 (1)");
+ COMPARE(Cmp(x1, -1), "cmn x1, #0x1 (1)");
+ COMPARE(Cmp(w2, -4095), "cmn w2, #0xfff (4095)");
+ COMPARE(Cmp(x3, -4095), "cmn x3, #0xfff (4095)");
+
+ COMPARE(Cmn(w0, -1), "cmp w0, #0x1 (1)");
+ COMPARE(Cmn(x1, -1), "cmp x1, #0x1 (1)");
+ COMPARE(Cmn(w2, -4095), "cmp w2, #0xfff (4095)");
+ COMPARE(Cmn(x3, -4095), "cmp x3, #0xfff (4095)");
+
+ CLEANUP();
+}
+
+
+TEST_(logical_immediate_move) {
+ SET_UP_CLASS(MacroAssembler);
+
+ COMPARE(And(w0, w1, 0), "movz w0, #0x0");
+ COMPARE(And(x0, x1, 0), "movz x0, #0x0");
+ COMPARE(Orr(w2, w3, 0), "mov w2, w3");
+ COMPARE(Orr(x2, x3, 0), "mov x2, x3");
+ COMPARE(Eor(w4, w5, 0), "mov w4, w5");
+ COMPARE(Eor(x4, x5, 0), "mov x4, x5");
+ COMPARE(Bic(w6, w7, 0), "mov w6, w7");
+ COMPARE(Bic(x6, x7, 0), "mov x6, x7");
+ COMPARE(Orn(w8, w9, 0), "movn w8, #0x0");
+ COMPARE(Orn(x8, x9, 0), "movn x8, #0x0");
+ COMPARE(Eon(w10, w11, 0), "mvn w10, w11");
+ COMPARE(Eon(x10, x11, 0), "mvn x10, x11");
+
+ COMPARE(And(w12, w13, 0xffffffff), "mov w12, w13");
+ COMPARE(And(x12, x13, 0xffffffff), "and x12, x13, #0xffffffff");
+ COMPARE(And(x12, x13, 0xffffffffffffffff), "mov x12, x13");
+ COMPARE(Orr(w14, w15, 0xffffffff), "movn w14, #0x0");
+ COMPARE(Orr(x14, x15, 0xffffffff), "orr x14, x15, #0xffffffff");
+ COMPARE(Orr(x14, x15, 0xffffffffffffffff), "movn x14, #0x0");
+ COMPARE(Eor(w16, w17, 0xffffffff), "mvn w16, w17");
+ COMPARE(Eor(x16, x17, 0xffffffff), "eor x16, x17, #0xffffffff");
+ COMPARE(Eor(x16, x17, 0xffffffffffffffff), "mvn x16, x17");
+ COMPARE(Bic(w18, w19, 0xffffffff), "movz w18, #0x0");
+ COMPARE(Bic(x18, x19, 0xffffffff), "and x18, x19, #0xffffffff00000000");
+ COMPARE(Bic(x18, x19, 0xffffffffffffffff), "movz x18, #0x0");
+ COMPARE(Orn(w20, w21, 0xffffffff), "mov w20, w21");
+ COMPARE(Orn(x20, x21, 0xffffffff), "orr x20, x21, #0xffffffff00000000");
+ COMPARE(Orn(x20, x21, 0xffffffffffffffff), "mov x20, x21");
+ COMPARE(Eon(w22, w23, 0xffffffff), "mov w22, w23");
+ COMPARE(Eon(x22, x23, 0xffffffff), "eor x22, x23, #0xffffffff00000000");
+ COMPARE(Eon(x22, x23, 0xffffffffffffffff), "mov x22, x23");
+
+ CLEANUP();
+}
+
+
+TEST_(barriers) {
+ SET_UP_CLASS(MacroAssembler);
+
+ // DMB
+ COMPARE(Dmb(FullSystem, BarrierAll), "dmb sy");
+ COMPARE(Dmb(FullSystem, BarrierReads), "dmb ld");
+ COMPARE(Dmb(FullSystem, BarrierWrites), "dmb st");
+
+ COMPARE(Dmb(InnerShareable, BarrierAll), "dmb ish");
+ COMPARE(Dmb(InnerShareable, BarrierReads), "dmb ishld");
+ COMPARE(Dmb(InnerShareable, BarrierWrites), "dmb ishst");
+
+ COMPARE(Dmb(NonShareable, BarrierAll), "dmb nsh");
+ COMPARE(Dmb(NonShareable, BarrierReads), "dmb nshld");
+ COMPARE(Dmb(NonShareable, BarrierWrites), "dmb nshst");
+
+ COMPARE(Dmb(OuterShareable, BarrierAll), "dmb osh");
+ COMPARE(Dmb(OuterShareable, BarrierReads), "dmb oshld");
+ COMPARE(Dmb(OuterShareable, BarrierWrites), "dmb oshst");
+
+ COMPARE(Dmb(FullSystem, BarrierOther), "dmb sy (0b1100)");
+ COMPARE(Dmb(InnerShareable, BarrierOther), "dmb sy (0b1000)");
+ COMPARE(Dmb(NonShareable, BarrierOther), "dmb sy (0b0100)");
+ COMPARE(Dmb(OuterShareable, BarrierOther), "dmb sy (0b0000)");
+
+ // DSB
+ COMPARE(Dsb(FullSystem, BarrierAll), "dsb sy");
+ COMPARE(Dsb(FullSystem, BarrierReads), "dsb ld");
+ COMPARE(Dsb(FullSystem, BarrierWrites), "dsb st");
+
+ COMPARE(Dsb(InnerShareable, BarrierAll), "dsb ish");
+ COMPARE(Dsb(InnerShareable, BarrierReads), "dsb ishld");
+ COMPARE(Dsb(InnerShareable, BarrierWrites), "dsb ishst");
+
+ COMPARE(Dsb(NonShareable, BarrierAll), "dsb nsh");
+ COMPARE(Dsb(NonShareable, BarrierReads), "dsb nshld");
+ COMPARE(Dsb(NonShareable, BarrierWrites), "dsb nshst");
+
+ COMPARE(Dsb(OuterShareable, BarrierAll), "dsb osh");
+ COMPARE(Dsb(OuterShareable, BarrierReads), "dsb oshld");
+ COMPARE(Dsb(OuterShareable, BarrierWrites), "dsb oshst");
+
+ COMPARE(Dsb(FullSystem, BarrierOther), "dsb sy (0b1100)");
+ COMPARE(Dsb(InnerShareable, BarrierOther), "dsb sy (0b1000)");
+ COMPARE(Dsb(NonShareable, BarrierOther), "dsb sy (0b0100)");
+ COMPARE(Dsb(OuterShareable, BarrierOther), "dsb sy (0b0000)");
+
+ // ISB
+ COMPARE(Isb(), "isb");
+
+ CLEANUP();
+}
diff --git a/deps/v8/test/cctest/test-disasm-ia32.cc b/deps/v8/test/cctest/test-disasm-ia32.cc
index f32a69c4a..7ca95f6c9 100644
--- a/deps/v8/test/cctest/test-disasm-ia32.cc
+++ b/deps/v8/test/cctest/test-disasm-ia32.cc
@@ -28,13 +28,13 @@
#include <stdlib.h>
#include "v8.h"
-#include "stub-cache.h"
#include "debug.h"
#include "disasm.h"
#include "disassembler.h"
#include "macro-assembler.h"
#include "serialize.h"
+#include "stub-cache.h"
#include "cctest.h"
using namespace v8::internal;
@@ -49,7 +49,7 @@ static void DummyStaticFunction(Object* result) {
TEST(DisasmIa320) {
CcTest::InitializeVM();
- Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
v8::internal::byte buffer[2048];
Assembler assm(isolate, buffer, sizeof buffer);
@@ -74,12 +74,23 @@ TEST(DisasmIa320) {
__ add(edx, Operand(ebx, 0));
__ add(edx, Operand(ebx, 16));
__ add(edx, Operand(ebx, 1999));
+ __ add(edx, Operand(ebx, -4));
+ __ add(edx, Operand(ebx, -1999));
__ add(edx, Operand(esp, 0));
__ add(edx, Operand(esp, 16));
__ add(edx, Operand(esp, 1999));
+ __ add(edx, Operand(esp, -4));
+ __ add(edx, Operand(esp, -1999));
+ __ nop();
+ __ add(esi, Operand(ecx, times_4, 0));
+ __ add(esi, Operand(ecx, times_4, 24));
+ __ add(esi, Operand(ecx, times_4, -4));
+ __ add(esi, Operand(ecx, times_4, -1999));
__ nop();
__ add(edi, Operand(ebp, ecx, times_4, 0));
__ add(edi, Operand(ebp, ecx, times_4, 12));
+ __ add(edi, Operand(ebp, ecx, times_4, -8));
+ __ add(edi, Operand(ebp, ecx, times_4, -3999));
__ add(Operand(ebp, ecx, times_4, 12), Immediate(12));
__ nop();
diff --git a/deps/v8/test/cctest/test-disasm-x64.cc b/deps/v8/test/cctest/test-disasm-x64.cc
index 9fce25fae..5ca12b943 100644
--- a/deps/v8/test/cctest/test-disasm-x64.cc
+++ b/deps/v8/test/cctest/test-disasm-x64.cc
@@ -34,6 +34,7 @@
#include "disassembler.h"
#include "macro-assembler.h"
#include "serialize.h"
+#include "stub-cache.h"
#include "cctest.h"
using namespace v8::internal;
@@ -48,17 +49,18 @@ static void DummyStaticFunction(Object* result) {
TEST(DisasmX64) {
CcTest::InitializeVM();
- v8::HandleScope scope;
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
v8::internal::byte buffer[2048];
- Assembler assm(CcTest::i_isolate(), buffer, sizeof buffer);
+ Assembler assm(isolate, buffer, sizeof buffer);
DummyStaticFunction(NULL); // just bloody use it (DELETE; debugging)
// Short immediate instructions
__ addq(rax, Immediate(12345678));
- __ or_(rax, Immediate(12345678));
+ __ orq(rax, Immediate(12345678));
__ subq(rax, Immediate(12345678));
- __ xor_(rax, Immediate(12345678));
- __ and_(rax, Immediate(12345678));
+ __ xorq(rax, Immediate(12345678));
+ __ andq(rax, Immediate(12345678));
// ---- This one caused crash
__ movq(rbx, Operand(rsp, rcx, times_2, 0)); // [rsp+rcx*4]
@@ -68,27 +70,38 @@ TEST(DisasmX64) {
__ addq(rdx, Operand(rbx, 0));
__ addq(rdx, Operand(rbx, 16));
__ addq(rdx, Operand(rbx, 1999));
+ __ addq(rdx, Operand(rbx, -4));
+ __ addq(rdx, Operand(rbx, -1999));
__ addq(rdx, Operand(rsp, 0));
__ addq(rdx, Operand(rsp, 16));
__ addq(rdx, Operand(rsp, 1999));
+ __ addq(rdx, Operand(rsp, -4));
+ __ addq(rdx, Operand(rsp, -1999));
+ __ nop();
+ __ addq(rsi, Operand(rcx, times_4, 0));
+ __ addq(rsi, Operand(rcx, times_4, 24));
+ __ addq(rsi, Operand(rcx, times_4, -4));
+ __ addq(rsi, Operand(rcx, times_4, -1999));
__ nop();
__ addq(rdi, Operand(rbp, rcx, times_4, 0));
__ addq(rdi, Operand(rbp, rcx, times_4, 12));
+ __ addq(rdi, Operand(rbp, rcx, times_4, -8));
+ __ addq(rdi, Operand(rbp, rcx, times_4, -3999));
__ addq(Operand(rbp, rcx, times_4, 12), Immediate(12));
__ nop();
__ addq(rbx, Immediate(12));
__ nop();
__ nop();
- __ and_(rdx, Immediate(3));
- __ and_(rdx, Operand(rsp, 4));
+ __ andq(rdx, Immediate(3));
+ __ andq(rdx, Operand(rsp, 4));
__ cmpq(rdx, Immediate(3));
__ cmpq(rdx, Operand(rsp, 4));
__ cmpq(Operand(rbp, rcx, times_4, 0), Immediate(1000));
__ cmpb(rbx, Operand(rbp, rcx, times_2, 0));
__ cmpb(Operand(rbp, rcx, times_2, 0), rbx);
- __ or_(rdx, Immediate(3));
- __ xor_(rdx, Immediate(3));
+ __ orq(rdx, Immediate(3));
+ __ xorq(rdx, Immediate(3));
__ nop();
__ cpuid();
__ movsxbq(rdx, Operand(rcx, 0));
@@ -99,23 +112,23 @@ TEST(DisasmX64) {
__ movzxwq(rdx, Operand(rcx, 0));
__ nop();
- __ imul(rdx, rcx);
+ __ imulq(rdx, rcx);
__ shld(rdx, rcx);
__ shrd(rdx, rcx);
__ bts(Operand(rdx, 0), rcx);
__ bts(Operand(rbx, rcx, times_4, 0), rcx);
__ nop();
- __ push(Immediate(12));
- __ push(Immediate(23456));
- __ push(rcx);
- __ push(rsi);
- __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(Operand(rbx, rcx, times_4, 0));
- __ push(Operand(rbx, rcx, times_4, 0));
- __ push(Operand(rbx, rcx, times_4, 10000));
- __ pop(rdx);
- __ pop(rax);
- __ pop(Operand(rbx, rcx, times_4, 0));
+ __ pushq(Immediate(12));
+ __ pushq(Immediate(23456));
+ __ pushq(rcx);
+ __ pushq(rsi);
+ __ pushq(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ pushq(Operand(rbx, rcx, times_4, 0));
+ __ pushq(Operand(rbx, rcx, times_4, 0));
+ __ pushq(Operand(rbx, rcx, times_4, 10000));
+ __ popq(rdx);
+ __ popq(rax);
+ __ popq(Operand(rbx, rcx, times_4, 0));
__ nop();
__ addq(rdx, Operand(rsp, 16));
@@ -145,23 +158,24 @@ TEST(DisasmX64) {
__ nop();
__ idivq(rdx);
__ mul(rdx);
- __ neg(rdx);
- __ not_(rdx);
+ __ negq(rdx);
+ __ notq(rdx);
__ testq(Operand(rbx, rcx, times_4, 10000), rdx);
- __ imul(rdx, Operand(rbx, rcx, times_4, 10000));
- __ imul(rdx, rcx, Immediate(12));
- __ imul(rdx, rcx, Immediate(1000));
+ __ imulq(rdx, Operand(rbx, rcx, times_4, 10000));
+ __ imulq(rdx, rcx, Immediate(12));
+ __ imulq(rdx, rcx, Immediate(1000));
__ incq(rdx);
__ incq(Operand(rbx, rcx, times_4, 10000));
- __ push(Operand(rbx, rcx, times_4, 10000));
- __ pop(Operand(rbx, rcx, times_4, 10000));
- __ jmp(Operand(rbx, rcx, times_4, 10000));
+ __ pushq(Operand(rbx, rcx, times_4, 10000));
+ __ popq(Operand(rbx, rcx, times_4, 10000));
+ // TODO(mstarzinger): The following is protected.
+ // __ jmp(Operand(rbx, rcx, times_4, 10000));
- __ lea(rdx, Operand(rbx, rcx, times_4, 10000));
- __ or_(rdx, Immediate(12345));
- __ or_(rdx, Operand(rbx, rcx, times_4, 10000));
+ __ leaq(rdx, Operand(rbx, rcx, times_4, 10000));
+ __ orq(rdx, Immediate(12345));
+ __ orq(rdx, Operand(rbx, rcx, times_4, 10000));
__ nop();
@@ -188,22 +202,22 @@ TEST(DisasmX64) {
__ addq(rbx, Immediate(12));
__ addq(Operand(rdx, rcx, times_4, 10000), Immediate(12));
- __ and_(rbx, Immediate(12345));
+ __ andq(rbx, Immediate(12345));
__ cmpq(rbx, Immediate(12345));
__ cmpq(rbx, Immediate(12));
__ cmpq(Operand(rdx, rcx, times_4, 10000), Immediate(12));
__ cmpb(rax, Immediate(100));
- __ or_(rbx, Immediate(12345));
+ __ orq(rbx, Immediate(12345));
__ subq(rbx, Immediate(12));
__ subq(Operand(rdx, rcx, times_4, 10000), Immediate(12));
- __ xor_(rbx, Immediate(12345));
+ __ xorq(rbx, Immediate(12345));
- __ imul(rdx, rcx, Immediate(12));
- __ imul(rdx, rcx, Immediate(1000));
+ __ imulq(rdx, rcx, Immediate(12));
+ __ imulq(rdx, rcx, Immediate(1000));
__ cld();
@@ -216,8 +230,8 @@ TEST(DisasmX64) {
__ testb(Operand(rax, -20), Immediate(0x9A));
__ nop();
- __ xor_(rdx, Immediate(12345));
- __ xor_(rdx, Operand(rbx, rcx, times_8, 10000));
+ __ xorq(rdx, Immediate(12345));
+ __ xorq(rdx, Operand(rbx, rcx, times_8, 10000));
__ bts(Operand(rbx, rcx, times_8, 10000), rdx);
__ hlt();
__ int3();
@@ -233,20 +247,20 @@ TEST(DisasmX64) {
__ call(&L2);
__ nop();
__ bind(&L2);
- __ call(Operand(rbx, rcx, times_4, 10000));
+ // TODO(mstarzinger): The following is protected.
+ // __ call(Operand(rbx, rcx, times_4, 10000));
__ nop();
- Handle<Code> ic(CcTest::i_isolate()->builtins()->builtin(
- Builtins::kLoadIC_Initialize));
+ Handle<Code> ic(LoadIC::initialize_stub(isolate, NOT_CONTEXTUAL));
__ call(ic, RelocInfo::CODE_TARGET);
__ nop();
__ nop();
__ jmp(&L1);
- __ jmp(Operand(rbx, rcx, times_4, 10000));
+ // TODO(mstarzinger): The following is protected.
+ // __ jmp(Operand(rbx, rcx, times_4, 10000));
#ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference after_break_target =
- ExternalReference(Debug_Address::AfterBreakTarget(),
- assm.isolate());
+ ExternalReference(Debug_Address::AfterBreakTarget(), isolate);
USE(after_break_target);
#endif // ENABLE_DEBUGGER_SUPPORT
__ jmp(ic, RelocInfo::CODE_TARGET);
@@ -345,9 +359,9 @@ TEST(DisasmX64) {
__ andps(xmm0, xmm1);
__ andps(xmm0, Operand(rbx, rcx, times_4, 10000));
__ orps(xmm0, xmm1);
- __ ordps(xmm0, Operand(rbx, rcx, times_4, 10000));
+ __ orps(xmm0, Operand(rbx, rcx, times_4, 10000));
__ xorps(xmm0, xmm1);
- __ xordps(xmm0, Operand(rbx, rcx, times_4, 10000));
+ __ xorps(xmm0, Operand(rbx, rcx, times_4, 10000));
// Arithmetic operation
__ addps(xmm1, xmm0);
@@ -355,7 +369,7 @@ TEST(DisasmX64) {
__ subps(xmm1, xmm0);
__ subps(xmm1, Operand(rbx, rcx, times_4, 10000));
__ mulps(xmm1, xmm0);
- __ mulps(xmm1, Operand(rbx, ecx, times_4, 10000));
+ __ mulps(xmm1, Operand(rbx, rcx, times_4, 10000));
__ divps(xmm1, xmm0);
__ divps(xmm1, Operand(rbx, rcx, times_4, 10000));
}
diff --git a/deps/v8/test/cctest/test-fuzz-arm64.cc b/deps/v8/test/cctest/test-fuzz-arm64.cc
new file mode 100644
index 000000000..0ceb60f7b
--- /dev/null
+++ b/deps/v8/test/cctest/test-fuzz-arm64.cc
@@ -0,0 +1,71 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+#include "cctest.h"
+
+#include "arm64/decoder-arm64.h"
+#include "arm64/decoder-arm64-inl.h"
+#include "arm64/disasm-arm64.h"
+
+using namespace v8::internal;
+
+TEST(FUZZ_decoder) {
+ // Feed noise into the decoder to check that it doesn't crash.
+ // 43 million = ~1% of the instruction space.
+ static const int instruction_count = 43 * 1024 * 1024;
+
+ uint16_t seed[3] = {1, 2, 3};
+ seed48(seed);
+
+ Decoder<DispatchingDecoderVisitor> decoder;
+ Instruction buffer[kInstructionSize];
+
+ for (int i = 0; i < instruction_count; i++) {
+ uint32_t instr = mrand48();
+ buffer->SetInstructionBits(instr);
+ decoder.Decode(buffer);
+ }
+}
+
+
+TEST(FUZZ_disasm) {
+ // Feed noise into the disassembler to check that it doesn't crash.
+ // 9 million = ~0.2% of the instruction space.
+ static const int instruction_count = 9 * 1024 * 1024;
+
+ uint16_t seed[3] = {42, 43, 44};
+ seed48(seed);
+
+ Decoder<DispatchingDecoderVisitor> decoder;
+ Disassembler disasm;
+ Instruction buffer[kInstructionSize];
+
+ decoder.AppendVisitor(&disasm);
+ for (int i = 0; i < instruction_count; i++) {
+ uint32_t instr = mrand48();
+ buffer->SetInstructionBits(instr);
+ decoder.Decode(buffer);
+ }
+}
diff --git a/deps/v8/test/cctest/test-hashing.cc b/deps/v8/test/cctest/test-hashing.cc
index 3ec844e9c..66ee04158 100644
--- a/deps/v8/test/cctest/test-hashing.cc
+++ b/deps/v8/test/cctest/test-hashing.cc
@@ -66,21 +66,21 @@ void generate(MacroAssembler* masm, i::Vector<const uint8_t> string) {
__ pop(ebx);
__ Ret();
#elif V8_TARGET_ARCH_X64
- __ push(kRootRegister);
+ __ pushq(kRootRegister);
__ InitializeRootRegister();
- __ push(rbx);
- __ push(rcx);
- __ movq(rax, Immediate(0));
- __ movq(rbx, Immediate(string.at(0)));
+ __ pushq(rbx);
+ __ pushq(rcx);
+ __ movp(rax, Immediate(0));
+ __ movp(rbx, Immediate(string.at(0)));
StringHelper::GenerateHashInit(masm, rax, rbx, rcx);
for (int i = 1; i < string.length(); i++) {
- __ movq(rbx, Immediate(string.at(i)));
+ __ movp(rbx, Immediate(string.at(i)));
StringHelper::GenerateHashAddCharacter(masm, rax, rbx, rcx);
}
StringHelper::GenerateHashGetHash(masm, rax, rcx);
- __ pop(rcx);
- __ pop(rbx);
- __ pop(kRootRegister);
+ __ popq(rcx);
+ __ popq(rbx);
+ __ popq(kRootRegister);
__ Ret();
#elif V8_TARGET_ARCH_ARM
__ push(kRootRegister);
@@ -96,6 +96,24 @@ void generate(MacroAssembler* masm, i::Vector<const uint8_t> string) {
StringHelper::GenerateHashGetHash(masm, r0);
__ pop(kRootRegister);
__ mov(pc, Operand(lr));
+#elif V8_TARGET_ARCH_ARM64
+ // The ARM64 assembler usually uses jssp (x28) as a stack pointer, but only
+ // csp is initialized by the calling (C++) code.
+ Register old_stack_pointer = __ StackPointer();
+ __ SetStackPointer(csp);
+ __ Push(root, xzr);
+ __ InitializeRootRegister();
+ __ Mov(x0, 0);
+ __ Mov(x10, Operand(string.at(0)));
+ StringHelper::GenerateHashInit(masm, x0, x10);
+ for (int i = 1; i < string.length(); i++) {
+ __ Mov(x10, Operand(string.at(i)));
+ StringHelper::GenerateHashAddCharacter(masm, x0, x10);
+ }
+ StringHelper::GenerateHashGetHash(masm, x0, x10);
+ __ Pop(xzr, root);
+ __ Ret();
+ __ SetStackPointer(old_stack_pointer);
#elif V8_TARGET_ARCH_MIPS
__ push(kRootRegister);
__ InitializeRootRegister();
@@ -111,6 +129,8 @@ void generate(MacroAssembler* masm, i::Vector<const uint8_t> string) {
__ pop(kRootRegister);
__ jr(ra);
__ nop();
+#else
+#error Unsupported architecture.
#endif
}
@@ -123,13 +143,13 @@ void generate(MacroAssembler* masm, uint32_t key) {
__ pop(ebx);
__ Ret();
#elif V8_TARGET_ARCH_X64
- __ push(kRootRegister);
+ __ pushq(kRootRegister);
__ InitializeRootRegister();
- __ push(rbx);
- __ movq(rax, Immediate(key));
+ __ pushq(rbx);
+ __ movp(rax, Immediate(key));
__ GetNumberHash(rax, rbx);
- __ pop(rbx);
- __ pop(kRootRegister);
+ __ popq(rbx);
+ __ popq(kRootRegister);
__ Ret();
#elif V8_TARGET_ARCH_ARM
__ push(kRootRegister);
@@ -138,6 +158,18 @@ void generate(MacroAssembler* masm, uint32_t key) {
__ GetNumberHash(r0, ip);
__ pop(kRootRegister);
__ mov(pc, Operand(lr));
+#elif V8_TARGET_ARCH_ARM64
+ // The ARM64 assembler usually uses jssp (x28) as a stack pointer, but only
+ // csp is initialized by the calling (C++) code.
+ Register old_stack_pointer = __ StackPointer();
+ __ SetStackPointer(csp);
+ __ Push(root, xzr);
+ __ InitializeRootRegister();
+ __ Mov(x0, key);
+ __ GetNumberHash(x0, x10);
+ __ Pop(xzr, root);
+ __ Ret();
+ __ SetStackPointer(old_stack_pointer);
#elif V8_TARGET_ARCH_MIPS
__ push(kRootRegister);
__ InitializeRootRegister();
@@ -146,6 +178,8 @@ void generate(MacroAssembler* masm, uint32_t key) {
__ pop(kRootRegister);
__ jr(ra);
__ nop();
+#else
+#error Unsupported architecture.
#endif
}
@@ -172,8 +206,8 @@ void check(i::Vector<const uint8_t> string) {
Handle<String> v8_string = factory->NewStringFromOneByte(string);
v8_string->set_hash_field(String::kEmptyHashField);
#ifdef USE_SIMULATOR
- uint32_t codegen_hash =
- reinterpret_cast<uint32_t>(CALL_GENERATED_CODE(hash, 0, 0, 0, 0, 0));
+ uint32_t codegen_hash = static_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(CALL_GENERATED_CODE(hash, 0, 0, 0, 0, 0)));
#else
uint32_t codegen_hash = hash();
#endif
@@ -207,8 +241,8 @@ void check(uint32_t key) {
HASH_FUNCTION hash = FUNCTION_CAST<HASH_FUNCTION>(code->entry());
#ifdef USE_SIMULATOR
- uint32_t codegen_hash =
- reinterpret_cast<uint32_t>(CALL_GENERATED_CODE(hash, 0, 0, 0, 0, 0));
+ uint32_t codegen_hash = static_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(CALL_GENERATED_CODE(hash, 0, 0, 0, 0, 0)));
#else
uint32_t codegen_hash = hash();
#endif
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index 1caa515a9..f1ccc571d 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -234,9 +234,9 @@ TEST(HeapSnapshotObjectSizes) {
CHECK_NE(NULL, x2);
// Test sizes.
- CHECK_NE(0, x->GetSelfSize());
- CHECK_NE(0, x1->GetSelfSize());
- CHECK_NE(0, x2->GetSelfSize());
+ CHECK_NE(0, static_cast<int>(x->GetShallowSize()));
+ CHECK_NE(0, static_cast<int>(x1->GetShallowSize()));
+ CHECK_NE(0, static_cast<int>(x2->GetShallowSize()));
}
@@ -2067,7 +2067,8 @@ TEST(AllocationSitesAreVisible) {
"elements");
CHECK_NE(NULL, elements);
CHECK_EQ(v8::HeapGraphNode::kArray, elements->GetType());
- CHECK_EQ(v8::internal::FixedArray::SizeFor(3), elements->GetSelfSize());
+ CHECK_EQ(v8::internal::FixedArray::SizeFor(3),
+ static_cast<int>(elements->GetShallowSize()));
v8::Handle<v8::Value> array_val =
heap_profiler->FindObjectById(transition_info->GetId());
@@ -2215,8 +2216,9 @@ static AllocationTraceNode* FindNode(
Vector<AllocationTraceNode*> children = node->children();
node = NULL;
for (int j = 0; j < children.length(); j++) {
- v8::SnapshotObjectId id = children[j]->function_id();
- AllocationTracker::FunctionInfo* info = tracker->GetFunctionInfo(id);
+ unsigned index = children[j]->function_info_index();
+ AllocationTracker::FunctionInfo* info =
+ tracker->function_info_list()[index];
if (info && strcmp(info->name, name) == 0) {
node = children[j];
break;
@@ -2363,6 +2365,34 @@ TEST(TrackBumpPointerAllocations) {
}
+TEST(TrackV8ApiAllocation) {
+ v8::HandleScope scope(v8::Isolate::GetCurrent());
+ LocalContext env;
+
+ v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
+ const char* names[] = { "(V8 API)" };
+ heap_profiler->StartTrackingHeapObjects(true);
+
+ v8::Handle<v8::Object> o1 = v8::Object::New(env->GetIsolate());
+ o1->Clone();
+
+ AllocationTracker* tracker =
+ reinterpret_cast<i::HeapProfiler*>(heap_profiler)->allocation_tracker();
+ CHECK_NE(NULL, tracker);
+ // Resolve all function locations.
+ tracker->PrepareForSerialization();
+ // Print for better diagnostics in case of failure.
+ tracker->trace_tree()->Print(tracker);
+
+ AllocationTraceNode* node =
+ FindNode(tracker, Vector<const char*>(names, ARRAY_SIZE(names)));
+ CHECK_NE(NULL, node);
+ CHECK_GE(node->allocation_count(), 2);
+ CHECK_GE(node->allocation_size(), 4 * node->allocation_count());
+ heap_profiler->StopTrackingHeapObjects();
+}
+
+
TEST(ArrayBufferAndArrayBufferView) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -2381,6 +2411,71 @@ TEST(ArrayBufferAndArrayBufferView) {
const v8::HeapGraphNode* first_view =
GetProperty(arr1_buffer, v8::HeapGraphEdge::kWeak, "weak_first_view");
CHECK_NE(NULL, first_view);
+ const v8::HeapGraphNode* backing_store =
+ GetProperty(arr1_buffer, v8::HeapGraphEdge::kInternal, "backing_store");
+ CHECK_NE(NULL, backing_store);
+ CHECK_EQ(400, static_cast<int>(backing_store->GetShallowSize()));
+}
+
+
+static int GetRetainersCount(const v8::HeapSnapshot* snapshot,
+ const v8::HeapGraphNode* node) {
+ int count = 0;
+ for (int i = 0, l = snapshot->GetNodesCount(); i < l; ++i) {
+ const v8::HeapGraphNode* parent = snapshot->GetNode(i);
+ for (int j = 0, l2 = parent->GetChildrenCount(); j < l2; ++j) {
+ if (parent->GetChild(j)->GetToNode() == node) {
+ ++count;
+ }
+ }
+ }
+ return count;
+}
+
+
+TEST(ArrayBufferSharedBackingStore) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+ v8::HeapProfiler* heap_profiler = isolate->GetHeapProfiler();
+
+ v8::Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(isolate, 1024);
+ CHECK_EQ(1024, static_cast<int>(ab->ByteLength()));
+ CHECK(!ab->IsExternal());
+ v8::ArrayBuffer::Contents ab_contents = ab->Externalize();
+ CHECK(ab->IsExternal());
+
+ CHECK_EQ(1024, static_cast<int>(ab_contents.ByteLength()));
+ void* data = ab_contents.Data();
+ ASSERT(data != NULL);
+ v8::Local<v8::ArrayBuffer> ab2 =
+ v8::ArrayBuffer::New(isolate, data, ab_contents.ByteLength());
+ CHECK(ab2->IsExternal());
+ env->Global()->Set(v8_str("ab1"), ab);
+ env->Global()->Set(v8_str("ab2"), ab2);
+
+ v8::Handle<v8::Value> result = CompileRun("ab2.byteLength");
+ CHECK_EQ(1024, result->Int32Value());
+
+ const v8::HeapSnapshot* snapshot =
+ heap_profiler->TakeHeapSnapshot(v8_str("snapshot"));
+ CHECK(ValidateSnapshot(snapshot));
+ const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+ const v8::HeapGraphNode* ab1_node =
+ GetProperty(global, v8::HeapGraphEdge::kProperty, "ab1");
+ CHECK_NE(NULL, ab1_node);
+ const v8::HeapGraphNode* ab1_data =
+ GetProperty(ab1_node, v8::HeapGraphEdge::kInternal, "backing_store");
+ CHECK_NE(NULL, ab1_data);
+ const v8::HeapGraphNode* ab2_node =
+ GetProperty(global, v8::HeapGraphEdge::kProperty, "ab2");
+ CHECK_NE(NULL, ab2_node);
+ const v8::HeapGraphNode* ab2_data =
+ GetProperty(ab2_node, v8::HeapGraphEdge::kInternal, "backing_store");
+ CHECK_NE(NULL, ab2_data);
+ CHECK_EQ(ab1_data, ab2_data);
+ CHECK_EQ(2, GetRetainersCount(snapshot, ab1_data));
+ free(data);
}
@@ -2411,3 +2506,63 @@ TEST(BoxObject) {
GetProperty(box_node, v8::HeapGraphEdge::kInternal, "value");
CHECK_NE(NULL, box_value);
}
+
+
+static inline i::Address ToAddress(int n) {
+ return reinterpret_cast<i::Address>(n);
+}
+
+
+TEST(AddressToTraceMap) {
+ i::AddressToTraceMap map;
+
+ CHECK_EQ(0, map.GetTraceNodeId(ToAddress(150)));
+
+ // [0x100, 0x200) -> 1
+ map.AddRange(ToAddress(0x100), 0x100, 1U);
+ CHECK_EQ(0, map.GetTraceNodeId(ToAddress(0x50)));
+ CHECK_EQ(1, map.GetTraceNodeId(ToAddress(0x100)));
+ CHECK_EQ(1, map.GetTraceNodeId(ToAddress(0x150)));
+ CHECK_EQ(0, map.GetTraceNodeId(ToAddress(0x100 + 0x100)));
+ CHECK_EQ(1, static_cast<int>(map.size()));
+
+ // [0x100, 0x200) -> 1, [0x200, 0x300) -> 2
+ map.AddRange(ToAddress(0x200), 0x100, 2U);
+ CHECK_EQ(2, map.GetTraceNodeId(ToAddress(0x2a0)));
+ CHECK_EQ(2, static_cast<int>(map.size()));
+
+ // [0x100, 0x180) -> 1, [0x180, 0x280) -> 3, [0x280, 0x300) -> 2
+ map.AddRange(ToAddress(0x180), 0x100, 3U);
+ CHECK_EQ(1, map.GetTraceNodeId(ToAddress(0x17F)));
+ CHECK_EQ(2, map.GetTraceNodeId(ToAddress(0x280)));
+ CHECK_EQ(3, map.GetTraceNodeId(ToAddress(0x180)));
+ CHECK_EQ(3, static_cast<int>(map.size()));
+
+ // [0x100, 0x180) -> 1, [0x180, 0x280) -> 3, [0x280, 0x300) -> 2,
+ // [0x400, 0x500) -> 4
+ map.AddRange(ToAddress(0x400), 0x100, 4U);
+ CHECK_EQ(1, map.GetTraceNodeId(ToAddress(0x17F)));
+ CHECK_EQ(2, map.GetTraceNodeId(ToAddress(0x280)));
+ CHECK_EQ(3, map.GetTraceNodeId(ToAddress(0x180)));
+ CHECK_EQ(4, map.GetTraceNodeId(ToAddress(0x450)));
+ CHECK_EQ(0, map.GetTraceNodeId(ToAddress(0x500)));
+ CHECK_EQ(0, map.GetTraceNodeId(ToAddress(0x350)));
+ CHECK_EQ(4, static_cast<int>(map.size()));
+
+ // [0x100, 0x180) -> 1, [0x180, 0x200) -> 3, [0x200, 0x600) -> 5
+ map.AddRange(ToAddress(0x200), 0x400, 5U);
+ CHECK_EQ(5, map.GetTraceNodeId(ToAddress(0x200)));
+ CHECK_EQ(5, map.GetTraceNodeId(ToAddress(0x400)));
+ CHECK_EQ(3, static_cast<int>(map.size()));
+
+ // [0x100, 0x180) -> 1, [0x180, 0x200) -> 7, [0x200, 0x600) ->5
+ map.AddRange(ToAddress(0x180), 0x80, 6U);
+ map.AddRange(ToAddress(0x180), 0x80, 7U);
+ CHECK_EQ(7, map.GetTraceNodeId(ToAddress(0x180)));
+ CHECK_EQ(5, map.GetTraceNodeId(ToAddress(0x200)));
+ CHECK_EQ(3, static_cast<int>(map.size()));
+
+ map.Clear();
+ CHECK_EQ(0, static_cast<int>(map.size()));
+ CHECK_EQ(0, map.GetTraceNodeId(ToAddress(0x400)));
+}
diff --git a/deps/v8/test/cctest/test-heap.cc b/deps/v8/test/cctest/test-heap.cc
index 3e8d93b3a..c1f20f1f0 100644
--- a/deps/v8/test/cctest/test-heap.cc
+++ b/deps/v8/test/cctest/test-heap.cc
@@ -148,6 +148,16 @@ static void CheckFindCodeObject(Isolate* isolate) {
}
+TEST(HandleNull) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope outer_scope(isolate);
+ LocalContext context;
+ Handle<Object> n(reinterpret_cast<Object*>(NULL), isolate);
+ CHECK(!n.is_null());
+}
+
+
TEST(HeapObjects) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
@@ -180,7 +190,7 @@ TEST(HeapObjects) {
CHECK(value->IsNumber());
CHECK_EQ(Smi::kMaxValue, Smi::cast(value)->value());
-#ifndef V8_TARGET_ARCH_X64
+#if !defined(V8_TARGET_ARCH_X64) && !defined(V8_TARGET_ARCH_ARM64)
// TODO(lrn): We need a NumberFromIntptr function in order to test this.
value = heap->NumberFromInt32(Smi::kMinValue - 1)->ToObjectChecked();
CHECK(value->IsHeapNumber());
@@ -275,11 +285,11 @@ TEST(GarbageCollection) {
Handle<Map> initial_map =
factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
function->set_initial_map(*initial_map);
- JSReceiver::SetProperty(global, name, function, NONE, kNonStrictMode);
+ JSReceiver::SetProperty(global, name, function, NONE, SLOPPY);
// Allocate an object. Unrooted after leaving the scope.
Handle<JSObject> obj = factory->NewJSObject(function);
- JSReceiver::SetProperty(obj, prop_name, twenty_three, NONE, kNonStrictMode);
- JSReceiver::SetProperty(obj, prop_namex, twenty_four, NONE, kNonStrictMode);
+ JSReceiver::SetProperty(obj, prop_name, twenty_three, NONE, SLOPPY);
+ JSReceiver::SetProperty(obj, prop_namex, twenty_four, NONE, SLOPPY);
CHECK_EQ(Smi::FromInt(23), obj->GetProperty(*prop_name));
CHECK_EQ(Smi::FromInt(24), obj->GetProperty(*prop_namex));
@@ -299,8 +309,8 @@ TEST(GarbageCollection) {
HandleScope inner_scope(isolate);
// Allocate another object, make it reachable from global.
Handle<JSObject> obj = factory->NewJSObject(function);
- JSReceiver::SetProperty(global, obj_name, obj, NONE, kNonStrictMode);
- JSReceiver::SetProperty(obj, prop_name, twenty_three, NONE, kNonStrictMode);
+ JSReceiver::SetProperty(global, obj_name, obj, NONE, SLOPPY);
+ JSReceiver::SetProperty(obj, prop_name, twenty_three, NONE, SLOPPY);
}
// After gc, it should survive.
@@ -433,7 +443,7 @@ TEST(WeakGlobalHandlesScavenge) {
&TestWeakGlobalHandleCallback);
// Scavenge treats weak pointers as normal roots.
- heap->PerformScavenge();
+ heap->CollectGarbage(NEW_SPACE);
CHECK((*h1)->IsString());
CHECK((*h2)->IsHeapNumber());
@@ -518,7 +528,7 @@ TEST(DeleteWeakGlobalHandle) {
&TestWeakGlobalHandleCallback);
// Scanvenge does not recognize weak reference.
- heap->PerformScavenge();
+ heap->CollectGarbage(NEW_SPACE);
CHECK(!WeakPointerCleared);
@@ -635,11 +645,10 @@ TEST(FunctionAllocation) {
Handle<String> prop_name = factory->InternalizeUtf8String("theSlot");
Handle<JSObject> obj = factory->NewJSObject(function);
- JSReceiver::SetProperty(obj, prop_name, twenty_three, NONE, kNonStrictMode);
+ JSReceiver::SetProperty(obj, prop_name, twenty_three, NONE, SLOPPY);
CHECK_EQ(Smi::FromInt(23), obj->GetProperty(*prop_name));
// Check that we can add properties to function objects.
- JSReceiver::SetProperty(function, prop_name, twenty_four, NONE,
- kNonStrictMode);
+ JSReceiver::SetProperty(function, prop_name, twenty_four, NONE, SLOPPY);
CHECK_EQ(Smi::FromInt(24), function->GetProperty(*prop_name));
}
@@ -666,7 +675,7 @@ TEST(ObjectProperties) {
CHECK(!JSReceiver::HasLocalProperty(obj, first));
// add first
- JSReceiver::SetProperty(obj, first, one, NONE, kNonStrictMode);
+ JSReceiver::SetProperty(obj, first, one, NONE, SLOPPY);
CHECK(JSReceiver::HasLocalProperty(obj, first));
// delete first
@@ -674,8 +683,8 @@ TEST(ObjectProperties) {
CHECK(!JSReceiver::HasLocalProperty(obj, first));
// add first and then second
- JSReceiver::SetProperty(obj, first, one, NONE, kNonStrictMode);
- JSReceiver::SetProperty(obj, second, two, NONE, kNonStrictMode);
+ JSReceiver::SetProperty(obj, first, one, NONE, SLOPPY);
+ JSReceiver::SetProperty(obj, second, two, NONE, SLOPPY);
CHECK(JSReceiver::HasLocalProperty(obj, first));
CHECK(JSReceiver::HasLocalProperty(obj, second));
@@ -687,8 +696,8 @@ TEST(ObjectProperties) {
CHECK(!JSReceiver::HasLocalProperty(obj, second));
// add first and then second
- JSReceiver::SetProperty(obj, first, one, NONE, kNonStrictMode);
- JSReceiver::SetProperty(obj, second, two, NONE, kNonStrictMode);
+ JSReceiver::SetProperty(obj, first, one, NONE, SLOPPY);
+ JSReceiver::SetProperty(obj, second, two, NONE, SLOPPY);
CHECK(JSReceiver::HasLocalProperty(obj, first));
CHECK(JSReceiver::HasLocalProperty(obj, second));
@@ -702,14 +711,14 @@ TEST(ObjectProperties) {
// check string and internalized string match
const char* string1 = "fisk";
Handle<String> s1 = factory->NewStringFromAscii(CStrVector(string1));
- JSReceiver::SetProperty(obj, s1, one, NONE, kNonStrictMode);
+ JSReceiver::SetProperty(obj, s1, one, NONE, SLOPPY);
Handle<String> s1_string = factory->InternalizeUtf8String(string1);
CHECK(JSReceiver::HasLocalProperty(obj, s1_string));
// check internalized string and string match
const char* string2 = "fugl";
Handle<String> s2_string = factory->InternalizeUtf8String(string2);
- JSReceiver::SetProperty(obj, s2_string, one, NONE, kNonStrictMode);
+ JSReceiver::SetProperty(obj, s2_string, one, NONE, SLOPPY);
Handle<String> s2 = factory->NewStringFromAscii(CStrVector(string2));
CHECK(JSReceiver::HasLocalProperty(obj, s2));
}
@@ -733,7 +742,7 @@ TEST(JSObjectMaps) {
// Set a propery
Handle<Smi> twenty_three(Smi::FromInt(23), isolate);
- JSReceiver::SetProperty(obj, prop_name, twenty_three, NONE, kNonStrictMode);
+ JSReceiver::SetProperty(obj, prop_name, twenty_three, NONE, SLOPPY);
CHECK_EQ(Smi::FromInt(23), obj->GetProperty(*prop_name));
// Check the map has changed
@@ -757,23 +766,23 @@ TEST(JSArray) {
Handle<JSObject> object = factory->NewJSObject(function);
Handle<JSArray> array = Handle<JSArray>::cast(object);
// We just initialized the VM, no heap allocation failure yet.
- array->Initialize(0)->ToObjectChecked();
+ JSArray::Initialize(array, 0);
// Set array length to 0.
- array->SetElementsLength(Smi::FromInt(0))->ToObjectChecked();
+ *JSArray::SetElementsLength(array, handle(Smi::FromInt(0), isolate));
CHECK_EQ(Smi::FromInt(0), array->length());
// Must be in fast mode.
CHECK(array->HasFastSmiOrObjectElements());
// array[length] = name.
- JSReceiver::SetElement(array, 0, name, NONE, kNonStrictMode);
+ JSReceiver::SetElement(array, 0, name, NONE, SLOPPY);
CHECK_EQ(Smi::FromInt(1), array->length());
- CHECK_EQ(array->GetElement(isolate, 0), *name);
+ CHECK_EQ(*i::Object::GetElement(isolate, array, 0), *name);
// Set array length with larger than smi value.
Handle<Object> length =
factory->NewNumberFromUint(static_cast<uint32_t>(Smi::kMaxValue) + 1);
- array->SetElementsLength(*length)->ToObjectChecked();
+ *JSArray::SetElementsLength(array, length);
uint32_t int_length = 0;
CHECK(length->ToArrayIndex(&int_length));
@@ -781,12 +790,12 @@ TEST(JSArray) {
CHECK(array->HasDictionaryElements()); // Must be in slow mode.
// array[length] = name.
- JSReceiver::SetElement(array, int_length, name, NONE, kNonStrictMode);
+ JSReceiver::SetElement(array, int_length, name, NONE, SLOPPY);
uint32_t new_int_length = 0;
CHECK(array->length()->ToArrayIndex(&new_int_length));
CHECK_EQ(static_cast<double>(int_length), new_int_length - 1);
- CHECK_EQ(array->GetElement(isolate, int_length), *name);
- CHECK_EQ(array->GetElement(isolate, 0), *name);
+ CHECK_EQ(*i::Object::GetElement(isolate, array, int_length), *name);
+ CHECK_EQ(*i::Object::GetElement(isolate, array, 0), *name);
}
@@ -808,31 +817,35 @@ TEST(JSObjectCopy) {
Handle<Smi> one(Smi::FromInt(1), isolate);
Handle<Smi> two(Smi::FromInt(2), isolate);
- JSReceiver::SetProperty(obj, first, one, NONE, kNonStrictMode);
- JSReceiver::SetProperty(obj, second, two, NONE, kNonStrictMode);
+ JSReceiver::SetProperty(obj, first, one, NONE, SLOPPY);
+ JSReceiver::SetProperty(obj, second, two, NONE, SLOPPY);
- JSReceiver::SetElement(obj, 0, first, NONE, kNonStrictMode);
- JSReceiver::SetElement(obj, 1, second, NONE, kNonStrictMode);
+ JSReceiver::SetElement(obj, 0, first, NONE, SLOPPY);
+ JSReceiver::SetElement(obj, 1, second, NONE, SLOPPY);
// Make the clone.
Handle<JSObject> clone = JSObject::Copy(obj);
CHECK(!clone.is_identical_to(obj));
- CHECK_EQ(obj->GetElement(isolate, 0), clone->GetElement(isolate, 0));
- CHECK_EQ(obj->GetElement(isolate, 1), clone->GetElement(isolate, 1));
+ CHECK_EQ(*i::Object::GetElement(isolate, obj, 0),
+ *i::Object::GetElement(isolate, clone, 0));
+ CHECK_EQ(*i::Object::GetElement(isolate, obj, 1),
+ *i::Object::GetElement(isolate, clone, 1));
CHECK_EQ(obj->GetProperty(*first), clone->GetProperty(*first));
CHECK_EQ(obj->GetProperty(*second), clone->GetProperty(*second));
// Flip the values.
- JSReceiver::SetProperty(clone, first, two, NONE, kNonStrictMode);
- JSReceiver::SetProperty(clone, second, one, NONE, kNonStrictMode);
+ JSReceiver::SetProperty(clone, first, two, NONE, SLOPPY);
+ JSReceiver::SetProperty(clone, second, one, NONE, SLOPPY);
- JSReceiver::SetElement(clone, 0, second, NONE, kNonStrictMode);
- JSReceiver::SetElement(clone, 1, first, NONE, kNonStrictMode);
+ JSReceiver::SetElement(clone, 0, second, NONE, SLOPPY);
+ JSReceiver::SetElement(clone, 1, first, NONE, SLOPPY);
- CHECK_EQ(obj->GetElement(isolate, 1), clone->GetElement(isolate, 0));
- CHECK_EQ(obj->GetElement(isolate, 0), clone->GetElement(isolate, 1));
+ CHECK_EQ(*i::Object::GetElement(isolate, obj, 1),
+ *i::Object::GetElement(isolate, clone, 0));
+ CHECK_EQ(*i::Object::GetElement(isolate, obj, 0),
+ *i::Object::GetElement(isolate, clone, 1));
CHECK_EQ(obj->GetProperty(*second), clone->GetProperty(*first));
CHECK_EQ(obj->GetProperty(*first), clone->GetProperty(*second));
@@ -1022,7 +1035,7 @@ TEST(Regression39128) {
// Step 4: clone jsobject, but force always allocate first to create a clone
// in old pointer space.
Address old_pointer_space_top = heap->old_pointer_space()->top();
- AlwaysAllocateScope aa_scope;
+ AlwaysAllocateScope aa_scope(isolate);
Object* clone_obj = heap->CopyJSObject(jsobject)->ToObjectChecked();
JSObject* clone = JSObject::cast(clone_obj);
if (clone->address() != old_pointer_space_top) {
@@ -1436,7 +1449,7 @@ TEST(TestInternalWeakLists) {
// Scavenge treats these references as strong.
for (int j = 0; j < 10; j++) {
- CcTest::heap()->PerformScavenge();
+ CcTest::heap()->CollectGarbage(NEW_SPACE);
CHECK_EQ(opt ? 5 : 0, CountOptimizedUserFunctions(ctx[i]));
}
@@ -1448,14 +1461,14 @@ TEST(TestInternalWeakLists) {
// Get rid of f3 and f5 in the same way.
CompileRun("f3=null");
for (int j = 0; j < 10; j++) {
- CcTest::heap()->PerformScavenge();
+ CcTest::heap()->CollectGarbage(NEW_SPACE);
CHECK_EQ(opt ? 4 : 0, CountOptimizedUserFunctions(ctx[i]));
}
CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
CHECK_EQ(opt ? 3 : 0, CountOptimizedUserFunctions(ctx[i]));
CompileRun("f5=null");
for (int j = 0; j < 10; j++) {
- CcTest::heap()->PerformScavenge();
+ CcTest::heap()->CollectGarbage(NEW_SPACE);
CHECK_EQ(opt ? 3 : 0, CountOptimizedUserFunctions(ctx[i]));
}
CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
@@ -1477,7 +1490,7 @@ TEST(TestInternalWeakLists) {
// Scavenge treats these references as strong.
for (int j = 0; j < 10; j++) {
- CcTest::heap()->PerformScavenge();
+ CcTest::heap()->CollectGarbage(i::NEW_SPACE);
CHECK_EQ(kNumTestContexts - i, CountNativeContexts());
}
@@ -1596,7 +1609,7 @@ TEST(TestSizeOfObjects) {
{
// Allocate objects on several different old-space pages so that
// lazy sweeping kicks in for subsequent GC runs.
- AlwaysAllocateScope always_allocate;
+ AlwaysAllocateScope always_allocate(CcTest::i_isolate());
int filler_size = static_cast<int>(FixedArray::SizeFor(8192));
for (int i = 1; i <= 100; i++) {
CcTest::heap()->AllocateFixedArray(8192, TENURED)->ToObjectChecked();
@@ -1663,7 +1676,7 @@ static void FillUpNewSpace(NewSpace* new_space) {
Isolate* isolate = heap->isolate();
Factory* factory = isolate->factory();
HandleScope scope(isolate);
- AlwaysAllocateScope always_allocate;
+ AlwaysAllocateScope always_allocate(isolate);
intptr_t available = new_space->EffectiveCapacity() - new_space->Size();
intptr_t number_of_fillers = (available / FixedArray::SizeFor(32)) - 1;
for (intptr_t i = 0; i < number_of_fillers; i++) {
@@ -2004,8 +2017,14 @@ TEST(PrototypeTransitionClearing) {
Factory* factory = isolate->factory();
v8::HandleScope scope(CcTest::isolate());
+ CompileRun("var base = {};");
+ Handle<JSObject> baseObject =
+ v8::Utils::OpenHandle(
+ *v8::Handle<v8::Object>::Cast(
+ CcTest::global()->Get(v8_str("base"))));
+ int initialTransitions = baseObject->map()->NumberOfProtoTransitions();
+
CompileRun(
- "var base = {};"
"var live = [];"
"for (var i = 0; i < 10; i++) {"
" var object = {};"
@@ -2014,32 +2033,29 @@ TEST(PrototypeTransitionClearing) {
" if (i >= 3) live.push(object, prototype);"
"}");
- Handle<JSObject> baseObject =
- v8::Utils::OpenHandle(
- *v8::Handle<v8::Object>::Cast(
- CcTest::global()->Get(v8_str("base"))));
-
// Verify that only dead prototype transitions are cleared.
- CHECK_EQ(10, baseObject->map()->NumberOfProtoTransitions());
+ CHECK_EQ(initialTransitions + 10,
+ baseObject->map()->NumberOfProtoTransitions());
CcTest::heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
const int transitions = 10 - 3;
- CHECK_EQ(transitions, baseObject->map()->NumberOfProtoTransitions());
+ CHECK_EQ(initialTransitions + transitions,
+ baseObject->map()->NumberOfProtoTransitions());
// Verify that prototype transitions array was compacted.
FixedArray* trans = baseObject->map()->GetPrototypeTransitions();
- for (int i = 0; i < transitions; i++) {
+ for (int i = initialTransitions; i < initialTransitions + transitions; i++) {
int j = Map::kProtoTransitionHeaderSize +
i * Map::kProtoTransitionElementsPerEntry;
CHECK(trans->get(j + Map::kProtoTransitionMapOffset)->IsMap());
Object* proto = trans->get(j + Map::kProtoTransitionPrototypeOffset);
- CHECK(proto->IsTheHole() || proto->IsJSObject());
+ CHECK(proto->IsJSObject());
}
// Make sure next prototype is placed on an old-space evacuation candidate.
Handle<JSObject> prototype;
PagedSpace* space = CcTest::heap()->old_pointer_space();
{
- AlwaysAllocateScope always_allocate;
+ AlwaysAllocateScope always_allocate(isolate);
SimulateFullSpace(space);
prototype = factory->NewJSArray(32 * KB, FAST_HOLEY_ELEMENTS, TENURED);
}
@@ -2167,7 +2183,7 @@ TEST(OptimizedAllocationAlwaysInNewSpace) {
v8::HandleScope scope(CcTest::isolate());
SimulateFullSpace(CcTest::heap()->new_space());
- AlwaysAllocateScope always_allocate;
+ AlwaysAllocateScope always_allocate(CcTest::i_isolate());
v8::Local<v8::Value> res = CompileRun(
"function c(x) {"
" this.x = x;"
@@ -2210,10 +2226,10 @@ TEST(OptimizedPretenuringAllocationFolding) {
"var number_elements = 20000;"
"var elements = new Array();"
"function f() {"
- " for (var i = 0; i < 20000-1; i++) {"
+ " for (var i = 0; i < number_elements; i++) {"
" elements[i] = new DataObject();"
" }"
- " return new DataObject()"
+ " return elements[number_elements-1]"
"};"
"f(); f(); f();"
"%OptimizeFunctionOnNextCall(f);"
@@ -2512,6 +2528,44 @@ TEST(OptimizedPretenuringNestedDoubleLiterals) {
}
+// Make sure pretenuring feedback is gathered for constructed objects as well
+// as for literals.
+TEST(OptimizedPretenuringConstructorCalls) {
+ if (!FLAG_allocation_site_pretenuring || !i::FLAG_pretenuring_call_new) {
+ // FLAG_pretenuring_call_new needs to be synced with the snapshot.
+ return;
+ }
+ i::FLAG_allow_natives_syntax = true;
+ i::FLAG_max_new_space_size = 2048;
+ CcTest::InitializeVM();
+ if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
+ if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
+ v8::HandleScope scope(CcTest::isolate());
+
+ v8::Local<v8::Value> res = CompileRun(
+ "var number_elements = 20000;"
+ "var elements = new Array(number_elements);"
+ "function foo() {"
+ " this.a = 3;"
+ " this.b = {};"
+ "}"
+ "function f() {"
+ " for (var i = 0; i < number_elements; i++) {"
+ " elements[i] = new foo();"
+ " }"
+ " return elements[number_elements - 1];"
+ "};"
+ "f(); f(); f();"
+ "%OptimizeFunctionOnNextCall(f);"
+ "f();");
+
+ Handle<JSObject> o =
+ v8::Utils::OpenHandle(*v8::Handle<v8::Object>::Cast(res));
+
+ CHECK(CcTest::heap()->InOldPointerSpace(*o));
+}
+
+
// Test regular array literals allocation.
TEST(OptimizedAllocationArrayLiterals) {
i::FLAG_allow_natives_syntax = true;
@@ -2539,6 +2593,7 @@ TEST(OptimizedAllocationArrayLiterals) {
}
+// Test global pretenuring call new.
TEST(OptimizedPretenuringCallNew) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_allocation_site_pretenuring = false;
@@ -2549,7 +2604,7 @@ TEST(OptimizedPretenuringCallNew) {
v8::HandleScope scope(CcTest::isolate());
CcTest::heap()->SetNewSpaceHighPromotionModeActive(true);
- AlwaysAllocateScope always_allocate;
+ AlwaysAllocateScope always_allocate(CcTest::i_isolate());
v8::Local<v8::Value> res = CompileRun(
"function g() { this.a = 0; }"
"function f() {"
@@ -2581,7 +2636,7 @@ TEST(Regress1465) {
static const int transitions_count = 256;
{
- AlwaysAllocateScope always_allocate;
+ AlwaysAllocateScope always_allocate(CcTest::i_isolate());
for (int i = 0; i < transitions_count; i++) {
EmbeddedVector<char, 64> buffer;
OS::SNPrintF(buffer, "var o = new Object; o.prop%d = %d;", i, i);
@@ -2711,7 +2766,7 @@ TEST(ReleaseOverReservedPages) {
PagedSpace* old_pointer_space = heap->old_pointer_space();
CHECK_EQ(1, old_pointer_space->CountTotalPages());
for (int i = 0; i < number_of_test_pages; i++) {
- AlwaysAllocateScope always_allocate;
+ AlwaysAllocateScope always_allocate(isolate);
SimulateFullSpace(old_pointer_space);
factory->NewFixedArray(1, TENURED);
}
@@ -2760,7 +2815,7 @@ TEST(Regress2237) {
// Generate a sliced string that is based on the above parent and
// lives in old-space.
SimulateFullSpace(CcTest::heap()->new_space());
- AlwaysAllocateScope always_allocate;
+ AlwaysAllocateScope always_allocate(isolate);
Handle<String> t = factory->NewProperSubString(s, 5, 35);
CHECK(t->IsSlicedString());
CHECK(!CcTest::heap()->InNewSpace(*t));
@@ -2826,7 +2881,7 @@ TEST(Regress2211) {
}
-TEST(IncrementalMarkingClearsTypeFeedbackCells) {
+TEST(IncrementalMarkingClearsTypeFeedbackInfo) {
if (i::FLAG_always_opt) return;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
@@ -2849,23 +2904,27 @@ TEST(IncrementalMarkingClearsTypeFeedbackCells) {
CcTest::global()->Set(v8_str("fun1"), fun1);
CcTest::global()->Set(v8_str("fun2"), fun2);
CompileRun("function f(a, b) { a(); b(); } f(fun1, fun2);");
+
Handle<JSFunction> f =
v8::Utils::OpenHandle(
*v8::Handle<v8::Function>::Cast(
CcTest::global()->Get(v8_str("f"))));
- Handle<TypeFeedbackCells> cells(TypeFeedbackInfo::cast(
- f->shared()->code()->type_feedback_info())->type_feedback_cells());
- CHECK_EQ(2, cells->CellCount());
- CHECK(cells->GetCell(0)->value()->IsJSFunction());
- CHECK(cells->GetCell(1)->value()->IsJSFunction());
+ Handle<FixedArray> feedback_vector(TypeFeedbackInfo::cast(
+ f->shared()->code()->type_feedback_info())->feedback_vector());
+
+ CHECK_EQ(2, feedback_vector->length());
+ CHECK(feedback_vector->get(0)->IsJSFunction());
+ CHECK(feedback_vector->get(1)->IsJSFunction());
SimulateIncrementalMarking();
CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
- CHECK_EQ(2, cells->CellCount());
- CHECK(cells->GetCell(0)->value()->IsTheHole());
- CHECK(cells->GetCell(1)->value()->IsTheHole());
+ CHECK_EQ(2, feedback_vector->length());
+ CHECK_EQ(feedback_vector->get(0),
+ *TypeFeedbackInfo::UninitializedSentinel(CcTest::i_isolate()));
+ CHECK_EQ(feedback_vector->get(1),
+ *TypeFeedbackInfo::UninitializedSentinel(CcTest::i_isolate()));
}
@@ -3034,6 +3093,11 @@ void ReleaseStackTraceDataTest(const char* source, const char* accessor) {
TEST(ReleaseStackTraceData) {
+ if (i::FLAG_always_opt) {
+ // TODO(ulan): Remove this once the memory leak via code_next_link is fixed.
+ // See: https://codereview.chromium.org/181833004/
+ return;
+ }
FLAG_use_ic = false; // ICs retain objects.
FLAG_concurrent_recompilation = false;
CcTest::InitializeVM();
@@ -3344,7 +3408,7 @@ TEST(Regress169928) {
// This should crash with a protection violation if we are running a build
// with the bug.
- AlwaysAllocateScope aa_scope;
+ AlwaysAllocateScope aa_scope(isolate);
v8::Script::Compile(mote_code_string)->Run();
}
@@ -3686,3 +3750,166 @@ TEST(ObjectsInOptimizedCodeAreWeak) {
ASSERT(code->marked_for_deoptimization());
}
+
+
+
+static Handle<JSFunction> OptimizeDummyFunction(const char* name) {
+ EmbeddedVector<char, 256> source;
+ OS::SNPrintF(source,
+ "function %s() { return 0; }"
+ "%s(); %s();"
+ "%%OptimizeFunctionOnNextCall(%s);"
+ "%s();", name, name, name, name, name);
+ CompileRun(source.start());
+ Handle<JSFunction> fun =
+ v8::Utils::OpenHandle(
+ *v8::Handle<v8::Function>::Cast(
+ CcTest::global()->Get(v8_str(name))));
+ return fun;
+}
+
+
+static int GetCodeChainLength(Code* code) {
+ int result = 0;
+ while (code->next_code_link()->IsCode()) {
+ result++;
+ code = Code::cast(code->next_code_link());
+ }
+ return result;
+}
+
+
+TEST(NextCodeLinkIsWeak) {
+ i::FLAG_allow_natives_syntax = true;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ v8::internal::Heap* heap = CcTest::heap();
+
+ if (!isolate->use_crankshaft()) return;
+ HandleScope outer_scope(heap->isolate());
+ Handle<Code> code;
+ heap->CollectAllAvailableGarbage();
+ int code_chain_length_before, code_chain_length_after;
+ {
+ HandleScope scope(heap->isolate());
+ Handle<JSFunction> mortal = OptimizeDummyFunction("mortal");
+ Handle<JSFunction> immortal = OptimizeDummyFunction("immortal");
+ CHECK_EQ(immortal->code()->next_code_link(), mortal->code());
+ code_chain_length_before = GetCodeChainLength(immortal->code());
+ // Keep the immortal code and let the mortal code die.
+ code = scope.CloseAndEscape(Handle<Code>(immortal->code()));
+ CompileRun("mortal = null; immortal = null;");
+ }
+ heap->CollectAllAvailableGarbage();
+ // Now mortal code should be dead.
+ code_chain_length_after = GetCodeChainLength(*code);
+ CHECK_EQ(code_chain_length_before - 1, code_chain_length_after);
+}
+
+
+static Handle<Code> DummyOptimizedCode(Isolate* isolate) {
+ i::byte buffer[i::Assembler::kMinimalBufferSize];
+ MacroAssembler masm(isolate, buffer, sizeof(buffer));
+ CodeDesc desc;
+ masm.Prologue(BUILD_FUNCTION_FRAME);
+ masm.GetCode(&desc);
+ Handle<Object> undefined(isolate->heap()->undefined_value(), isolate);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::OPTIMIZED_FUNCTION), undefined);
+ CHECK(code->IsCode());
+ return code;
+}
+
+
+TEST(NextCodeLinkIsWeak2) {
+ i::FLAG_allow_natives_syntax = true;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ v8::internal::Heap* heap = CcTest::heap();
+
+ if (!isolate->use_crankshaft()) return;
+ HandleScope outer_scope(heap->isolate());
+ heap->CollectAllAvailableGarbage();
+ Handle<Context> context(Context::cast(heap->native_contexts_list()), isolate);
+ Handle<Code> new_head;
+ Handle<Object> old_head(context->get(Context::OPTIMIZED_CODE_LIST), isolate);
+ {
+ HandleScope scope(heap->isolate());
+ Handle<Code> immortal = DummyOptimizedCode(isolate);
+ Handle<Code> mortal = DummyOptimizedCode(isolate);
+ mortal->set_next_code_link(*old_head);
+ immortal->set_next_code_link(*mortal);
+ context->set(Context::OPTIMIZED_CODE_LIST, *immortal);
+ new_head = scope.CloseAndEscape(immortal);
+ }
+ heap->CollectAllAvailableGarbage();
+ // Now mortal code should be dead.
+ CHECK_EQ(*old_head, new_head->next_code_link());
+}
+
+
+#ifdef DEBUG
+TEST(AddInstructionChangesNewSpacePromotion) {
+ i::FLAG_allow_natives_syntax = true;
+ i::FLAG_expose_gc = true;
+ i::FLAG_stress_compaction = true;
+ i::FLAG_gc_interval = 1000;
+ CcTest::InitializeVM();
+ if (!i::FLAG_allocation_site_pretenuring) return;
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+
+ CompileRun(
+ "function add(a, b) {"
+ " return a + b;"
+ "}"
+ "add(1, 2);"
+ "add(\"a\", \"b\");"
+ "var oldSpaceObject;"
+ "gc();"
+ "function crash(x) {"
+ " var object = {a: null, b: null};"
+ " var result = add(1.5, x | 0);"
+ " object.a = result;"
+ " oldSpaceObject = object;"
+ " return object;"
+ "}"
+ "crash(1);"
+ "crash(1);"
+ "%OptimizeFunctionOnNextCall(crash);"
+ "crash(1);");
+
+ v8::Handle<v8::Object> global = CcTest::global();
+ v8::Handle<v8::Function> g =
+ v8::Handle<v8::Function>::Cast(global->Get(v8_str("crash")));
+ v8::Handle<v8::Value> args1[] = { v8_num(1) };
+ heap->DisableInlineAllocation();
+ heap->set_allocation_timeout(1);
+ g->Call(global, 1, args1);
+ heap->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+}
+
+
+void OnFatalErrorExpectOOM(const char* location, const char* message) {
+ // Exit with 0 if the location matches our expectation.
+ exit(strcmp(location, "CALL_AND_RETRY_LAST"));
+}
+
+
+TEST(CEntryStubOOM) {
+ i::FLAG_allow_natives_syntax = true;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ v8::V8::SetFatalErrorHandler(OnFatalErrorExpectOOM);
+
+ v8::Handle<v8::Value> result = CompileRun(
+ "%SetFlags('--gc-interval=1');"
+ "var a = [];"
+ "a.__proto__ = [];"
+ "a.unshift(1)");
+
+ CHECK(result->IsNumber());
+}
+
+#endif // DEBUG
diff --git a/deps/v8/test/cctest/test-javascript-arm64.cc b/deps/v8/test/cctest/test-javascript-arm64.cc
new file mode 100644
index 000000000..bd7a2b285
--- /dev/null
+++ b/deps/v8/test/cctest/test-javascript-arm64.cc
@@ -0,0 +1,266 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <limits.h>
+
+#include "v8.h"
+
+#include "api.h"
+#include "isolate.h"
+#include "compilation-cache.h"
+#include "execution.h"
+#include "snapshot.h"
+#include "platform.h"
+#include "utils.h"
+#include "cctest.h"
+#include "parser.h"
+#include "unicode-inl.h"
+
+using ::v8::Context;
+using ::v8::Extension;
+using ::v8::Function;
+using ::v8::FunctionTemplate;
+using ::v8::Handle;
+using ::v8::HandleScope;
+using ::v8::Local;
+using ::v8::Message;
+using ::v8::MessageCallback;
+using ::v8::Object;
+using ::v8::ObjectTemplate;
+using ::v8::Persistent;
+using ::v8::Script;
+using ::v8::StackTrace;
+using ::v8::String;
+using ::v8::TryCatch;
+using ::v8::Undefined;
+using ::v8::V8;
+using ::v8::Value;
+
+static void ExpectBoolean(bool expected, Local<Value> result) {
+ CHECK(result->IsBoolean());
+ CHECK_EQ(expected, result->BooleanValue());
+}
+
+
+static void ExpectInt32(int32_t expected, Local<Value> result) {
+ CHECK(result->IsInt32());
+ CHECK_EQ(expected, result->Int32Value());
+}
+
+
+static void ExpectNumber(double expected, Local<Value> result) {
+ CHECK(result->IsNumber());
+ CHECK_EQ(expected, result->NumberValue());
+}
+
+
+static void ExpectUndefined(Local<Value> result) {
+ CHECK(result->IsUndefined());
+}
+
+
+// Tests are sorted by order of implementation.
+
+TEST(simple_value) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result = CompileRun("0x271828;");
+ ExpectInt32(0x271828, result);
+}
+
+
+TEST(global_variable) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result = CompileRun("var my_global_var = 0x123; my_global_var;");
+ ExpectInt32(0x123, result);
+}
+
+
+TEST(simple_function_call) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result = CompileRun(
+ "function foo() { return 0x314; }"
+ "foo();");
+ ExpectInt32(0x314, result);
+}
+
+
+TEST(binary_op) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result = CompileRun(
+ "function foo() {"
+ " var a = 0x1200;"
+ " var b = 0x0035;"
+ " return 2 * (a + b - 1);"
+ "}"
+ "foo();");
+ ExpectInt32(0x2468, result);
+}
+
+static void if_comparison_testcontext_helper(
+ char const * op,
+ char const * lhs,
+ char const * rhs,
+ int expect) {
+ char buffer[256];
+ snprintf(buffer, sizeof(buffer),
+ "var lhs = %s;"
+ "var rhs = %s;"
+ "if ( lhs %s rhs ) { 1; }"
+ "else { 0; }",
+ lhs, rhs, op);
+ Local<Value> result = CompileRun(buffer);
+ ExpectInt32(expect, result);
+}
+
+static void if_comparison_effectcontext_helper(
+ char const * op,
+ char const * lhs,
+ char const * rhs,
+ int expect) {
+ char buffer[256];
+ snprintf(buffer, sizeof(buffer),
+ "var lhs = %s;"
+ "var rhs = %s;"
+ "var test = lhs %s rhs;"
+ "if ( test ) { 1; }"
+ "else { 0; }",
+ lhs, rhs, op);
+ Local<Value> result = CompileRun(buffer);
+ ExpectInt32(expect, result);
+}
+
+static void if_comparison_helper(
+ char const * op,
+ int expect_when_lt,
+ int expect_when_eq,
+ int expect_when_gt) {
+ // TODO(all): Non-SMI tests.
+
+ if_comparison_testcontext_helper(op, "1", "3", expect_when_lt);
+ if_comparison_testcontext_helper(op, "5", "5", expect_when_eq);
+ if_comparison_testcontext_helper(op, "9", "7", expect_when_gt);
+
+ if_comparison_effectcontext_helper(op, "1", "3", expect_when_lt);
+ if_comparison_effectcontext_helper(op, "5", "5", expect_when_eq);
+ if_comparison_effectcontext_helper(op, "9", "7", expect_when_gt);
+}
+
+
+TEST(if_comparison) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+
+ if_comparison_helper("<", 1, 0, 0);
+ if_comparison_helper("<=", 1, 1, 0);
+ if_comparison_helper("==", 0, 1, 0);
+ if_comparison_helper("===", 0, 1, 0);
+ if_comparison_helper(">=", 0, 1, 1);
+ if_comparison_helper(">", 0, 0, 1);
+ if_comparison_helper("!=", 1, 0, 1);
+ if_comparison_helper("!==", 1, 0, 1);
+}
+
+
+TEST(unary_plus) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result;
+ // SMI
+ result = CompileRun("var a = 1234; +a");
+ ExpectInt32(1234, result);
+ // Number
+ result = CompileRun("var a = 1234.5; +a");
+ ExpectNumber(1234.5, result);
+ // String (SMI)
+ result = CompileRun("var a = '1234'; +a");
+ ExpectInt32(1234, result);
+ // String (Number)
+ result = CompileRun("var a = '1234.5'; +a");
+ ExpectNumber(1234.5, result);
+ // Check side effects.
+ result = CompileRun("var a = 1234; +(a = 4321); a");
+ ExpectInt32(4321, result);
+}
+
+
+TEST(unary_minus) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result;
+ result = CompileRun("var a = 1234; -a");
+ ExpectInt32(-1234, result);
+ result = CompileRun("var a = 1234.5; -a");
+ ExpectNumber(-1234.5, result);
+ result = CompileRun("var a = 1234; -(a = 4321); a");
+ ExpectInt32(4321, result);
+ result = CompileRun("var a = '1234'; -a");
+ ExpectInt32(-1234, result);
+ result = CompileRun("var a = '1234.5'; -a");
+ ExpectNumber(-1234.5, result);
+}
+
+
+TEST(unary_void) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result;
+ result = CompileRun("var a = 1234; void (a);");
+ ExpectUndefined(result);
+ result = CompileRun("var a = 0; void (a = 42); a");
+ ExpectInt32(42, result);
+ result = CompileRun("var a = 0; void (a = 42);");
+ ExpectUndefined(result);
+}
+
+
+TEST(unary_not) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result;
+ result = CompileRun("var a = 1234; !a");
+ ExpectBoolean(false, result);
+ result = CompileRun("var a = 0; !a");
+ ExpectBoolean(true, result);
+ result = CompileRun("var a = 0; !(a = 1234); a");
+ ExpectInt32(1234, result);
+ result = CompileRun("var a = '1234'; !a");
+ ExpectBoolean(false, result);
+ result = CompileRun("var a = ''; !a");
+ ExpectBoolean(true, result);
+ result = CompileRun("var a = 1234; !!a");
+ ExpectBoolean(true, result);
+ result = CompileRun("var a = 0; !!a");
+ ExpectBoolean(false, result);
+ result = CompileRun("var a = 0; if ( !a ) { 1; } else { 0; }");
+ ExpectInt32(1, result);
+ result = CompileRun("var a = 1; if ( !a ) { 1; } else { 0; }");
+ ExpectInt32(0, result);
+}
diff --git a/deps/v8/test/cctest/test-js-arm64-variables.cc b/deps/v8/test/cctest/test-js-arm64-variables.cc
new file mode 100644
index 000000000..df3f4a829
--- /dev/null
+++ b/deps/v8/test/cctest/test-js-arm64-variables.cc
@@ -0,0 +1,143 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Adapted from test/mjsunit/compiler/variables.js
+
+#include <limits.h>
+
+#include "v8.h"
+
+#include "api.h"
+#include "isolate.h"
+#include "compilation-cache.h"
+#include "execution.h"
+#include "snapshot.h"
+#include "platform.h"
+#include "utils.h"
+#include "cctest.h"
+#include "parser.h"
+#include "unicode-inl.h"
+
+using ::v8::Context;
+using ::v8::Extension;
+using ::v8::Function;
+using ::v8::FunctionTemplate;
+using ::v8::Handle;
+using ::v8::HandleScope;
+using ::v8::Local;
+using ::v8::Message;
+using ::v8::MessageCallback;
+using ::v8::Object;
+using ::v8::ObjectTemplate;
+using ::v8::Persistent;
+using ::v8::Script;
+using ::v8::StackTrace;
+using ::v8::String;
+using ::v8::TryCatch;
+using ::v8::Undefined;
+using ::v8::V8;
+using ::v8::Value;
+
+static void ExpectInt32(int32_t expected, Local<Value> result) {
+ CHECK(result->IsInt32());
+ CHECK_EQ(expected, result->Int32Value());
+}
+
+
+// Global variables.
+TEST(global_variables) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result = CompileRun(
+"var x = 0;"
+"function f0() { return x; }"
+"f0();");
+ ExpectInt32(0, result);
+}
+
+
+// Parameters.
+TEST(parameters) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result = CompileRun(
+"function f1(x) { return x; }"
+"f1(1);");
+ ExpectInt32(1, result);
+}
+
+
+// Stack-allocated locals.
+TEST(stack_allocated_locals) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result = CompileRun(
+"function f2() { var x = 2; return x; }"
+"f2();");
+ ExpectInt32(2, result);
+}
+
+
+// Context-allocated locals. Local function forces x into f3's context.
+TEST(context_allocated_locals) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result = CompileRun(
+"function f3(x) {"
+" function g() { return x; }"
+" return x;"
+"}"
+"f3(3);");
+ ExpectInt32(3, result);
+}
+
+
+// Local function reads x from an outer context.
+TEST(read_from_outer_context) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result = CompileRun(
+"function f4(x) {"
+" function g() { return x; }"
+" return g();"
+"}"
+"f4(4);");
+ ExpectInt32(4, result);
+}
+
+
+// Local function reads x from an outer context.
+TEST(lookup_slots) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result = CompileRun(
+"function f5(x) {"
+" with ({}) return x;"
+"}"
+"f5(5);");
+ ExpectInt32(5, result);
+}
diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc
index 65310369c..42af0a555 100644
--- a/deps/v8/test/cctest/test-log.cc
+++ b/deps/v8/test/cctest/test-log.cc
@@ -310,7 +310,7 @@ TEST(Issue23768) {
// Script needs to have a name in order to trigger InitLineEnds execution.
v8::Handle<v8::String> origin =
v8::String::NewFromUtf8(CcTest::isolate(), "issue-23768-test");
- v8::Handle<v8::Script> evil_script = v8::Script::Compile(source, origin);
+ v8::Handle<v8::Script> evil_script = CompileWithOrigin(source, origin);
CHECK(!evil_script.IsEmpty());
CHECK(!evil_script->Run().IsEmpty());
i::Handle<i::ExternalTwoByteString> i_source(
@@ -468,7 +468,7 @@ TEST(EquivalenceOfLoggingAndTraversal) {
CcTest::isolate(), reinterpret_cast<const char*>(source.start()),
v8::String::kNormalString, source.length());
v8::TryCatch try_catch;
- v8::Handle<v8::Script> script = v8::Script::Compile(source_str, v8_str(""));
+ v8::Handle<v8::Script> script = CompileWithOrigin(source_str, "");
if (script.IsEmpty()) {
v8::String::Utf8Value exception(try_catch.Exception());
printf("compile: %s\n", *exception);
diff --git a/deps/v8/test/cctest/test-macro-assembler-ia32.cc b/deps/v8/test/cctest/test-macro-assembler-ia32.cc
index 38c738f1d..3ad52712c 100644
--- a/deps/v8/test/cctest/test-macro-assembler-ia32.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-ia32.cc
@@ -122,6 +122,34 @@ TEST(LoadAndStoreWithRepresentation) {
__ cmp(ebx, edx);
__ j(not_equal, &exit);
+ // Test 5.
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope scope(masm, SSE2);
+ __ mov(eax, Immediate(5)); // Test XMM move immediate.
+ __ Move(xmm0, 0.0);
+ __ Move(xmm1, 0.0);
+ __ ucomisd(xmm0, xmm1);
+ __ j(not_equal, &exit);
+ __ Move(xmm2, 991.01);
+ __ ucomisd(xmm0, xmm2);
+ __ j(equal, &exit);
+ __ Move(xmm0, 991.01);
+ __ ucomisd(xmm0, xmm2);
+ __ j(not_equal, &exit);
+ }
+
+ // Test 6.
+ __ mov(eax, Immediate(6));
+ __ Move(edx, Immediate(0)); // Test Move()
+ __ cmp(edx, Immediate(0));
+ __ j(not_equal, &exit);
+ __ Move(ecx, Immediate(-1));
+ __ cmp(ecx, Immediate(-1));
+ __ j(not_equal, &exit);
+ __ Move(ebx, Immediate(0x77));
+ __ cmp(ebx, Immediate(0x77));
+ __ j(not_equal, &exit);
+
__ xor_(eax, eax); // Success.
__ bind(&exit);
__ add(esp, Immediate(1 * kPointerSize));
diff --git a/deps/v8/test/cctest/test-macro-assembler-mips.cc b/deps/v8/test/cctest/test-macro-assembler-mips.cc
index b20094967..3154aac59 100644
--- a/deps/v8/test/cctest/test-macro-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-mips.cc
@@ -132,5 +132,47 @@ TEST(CopyBytes) {
}
+static void TestNaN(const char *code) {
+ // NaN value is different on MIPS and x86 architectures, and TEST(NaNx)
+ // tests checks the case where a x86 NaN value is serialized into the
+ // snapshot on the simulator during cross compilation.
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Local<v8::Context> context = CcTest::NewContext(PRINT_EXTENSION);
+ v8::Context::Scope context_scope(context);
+
+ v8::Local<v8::Script> script = v8::Script::Compile(v8_str(code));
+ v8::Local<v8::Object> result = v8::Local<v8::Object>::Cast(script->Run());
+ // Have to populate the handle manually, as it's not Cast-able.
+ i::Handle<i::JSObject> o =
+ v8::Utils::OpenHandle<v8::Object, i::JSObject>(result);
+ i::Handle<i::JSArray> array1(reinterpret_cast<i::JSArray*>(*o));
+ i::FixedDoubleArray* a = i::FixedDoubleArray::cast(array1->elements());
+ double value = a->get_scalar(0);
+ CHECK(std::isnan(value) &&
+ i::BitCast<uint64_t>(value) ==
+ i::BitCast<uint64_t>(
+ i::FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
+}
+
+
+TEST(NaN0) {
+ TestNaN(
+ "var result;"
+ "for (var i = 0; i < 2; i++) {"
+ " result = new Array(Number.NaN, Number.POSITIVE_INFINITY);"
+ "}"
+ "result;");
+}
+
+
+TEST(NaN1) {
+ TestNaN(
+ "var result;"
+ "for (var i = 0; i < 2; i++) {"
+ " result = [NaN];"
+ "}"
+ "result;");
+}
+
#undef __
diff --git a/deps/v8/test/cctest/test-macro-assembler-x64.cc b/deps/v8/test/cctest/test-macro-assembler-x64.cc
index 3daed5b45..f29daccea 100644
--- a/deps/v8/test/cctest/test-macro-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-x64.cc
@@ -99,8 +99,8 @@ typedef int (*F0)();
static void EntryCode(MacroAssembler* masm) {
// Smi constant register is callee save.
- __ push(i::kSmiConstantRegister);
- __ push(i::kRootRegister);
+ __ pushq(i::kSmiConstantRegister);
+ __ pushq(i::kRootRegister);
__ InitializeSmiConstantRegister();
__ InitializeRootRegister();
}
@@ -112,8 +112,8 @@ static void ExitCode(MacroAssembler* masm) {
__ cmpq(rdx, i::kSmiConstantRegister);
__ movq(rdx, Immediate(-1));
__ cmovq(not_equal, rax, rdx);
- __ pop(i::kRootRegister);
- __ pop(i::kSmiConstantRegister);
+ __ popq(i::kRootRegister);
+ __ popq(i::kSmiConstantRegister);
}
@@ -181,7 +181,7 @@ TEST(SmiMove) {
TestMoveSmi(masm, &exit, 11, Smi::FromInt(-257));
TestMoveSmi(masm, &exit, 12, Smi::FromInt(Smi::kMinValue));
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -277,7 +277,7 @@ TEST(SmiCompare) {
TestSmiCompare(masm, &exit, 0x120, Smi::kMaxValue, Smi::kMinValue);
TestSmiCompare(masm, &exit, 0x130, Smi::kMaxValue, Smi::kMaxValue);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -380,7 +380,7 @@ TEST(Integer32ToSmi) {
__ j(not_equal, &exit);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -450,7 +450,7 @@ TEST(Integer64PlusConstantToSmi) {
TestI64PlusConstantToSmi(masm, &exit, 0xB0, Smi::kMaxValue, 0);
TestI64PlusConstantToSmi(masm, &exit, 0xC0, twice_max, Smi::kMinValue);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -490,7 +490,7 @@ TEST(SmiCheck) {
__ j(NegateCondition(cond), &exit);
__ incq(rax);
- __ xor_(rcx, Immediate(kSmiTagMask));
+ __ xorq(rcx, Immediate(kSmiTagMask));
cond = masm->CheckSmi(rcx);
__ j(cond, &exit);
@@ -501,7 +501,7 @@ TEST(SmiCheck) {
__ j(NegateCondition(cond), &exit);
__ incq(rax);
- __ xor_(rcx, Immediate(kSmiTagMask));
+ __ xorq(rcx, Immediate(kSmiTagMask));
cond = masm->CheckSmi(rcx);
__ j(cond, &exit);
@@ -512,7 +512,7 @@ TEST(SmiCheck) {
__ j(NegateCondition(cond), &exit);
__ incq(rax);
- __ xor_(rcx, Immediate(kSmiTagMask));
+ __ xorq(rcx, Immediate(kSmiTagMask));
cond = masm->CheckSmi(rcx);
__ j(cond, &exit);
@@ -523,7 +523,7 @@ TEST(SmiCheck) {
__ j(NegateCondition(cond), &exit);
__ incq(rax);
- __ xor_(rcx, Immediate(kSmiTagMask));
+ __ xorq(rcx, Immediate(kSmiTagMask));
cond = masm->CheckSmi(rcx);
__ j(cond, &exit);
@@ -536,7 +536,7 @@ TEST(SmiCheck) {
__ j(NegateCondition(cond), &exit);
__ incq(rax);
- __ xor_(rcx, Immediate(kSmiTagMask));
+ __ xorq(rcx, Immediate(kSmiTagMask));
cond = masm->CheckNonNegativeSmi(rcx); // "zero" non-smi.
__ j(cond, &exit);
@@ -553,7 +553,7 @@ TEST(SmiCheck) {
__ j(cond, &exit);
__ incq(rax);
- __ xor_(rcx, Immediate(kSmiTagMask));
+ __ xorq(rcx, Immediate(kSmiTagMask));
cond = masm->CheckNonNegativeSmi(rcx); // "Negative" non-smi.
__ j(cond, &exit);
@@ -564,7 +564,7 @@ TEST(SmiCheck) {
__ j(NegateCondition(cond), &exit);
__ incq(rax);
- __ xor_(rcx, Immediate(kSmiTagMask));
+ __ xorq(rcx, Immediate(kSmiTagMask));
cond = masm->CheckNonNegativeSmi(rcx); // "Positive" non-smi.
__ j(cond, &exit);
@@ -605,17 +605,17 @@ TEST(SmiCheck) {
__ j(NegateCondition(cond), &exit);
__ incq(rax);
- __ xor_(rcx, Immediate(kSmiTagMask));
+ __ xorq(rcx, Immediate(kSmiTagMask));
cond = masm->CheckBothSmi(rcx, rdx);
__ j(cond, &exit);
__ incq(rax);
- __ xor_(rdx, Immediate(kSmiTagMask));
+ __ xorq(rdx, Immediate(kSmiTagMask));
cond = masm->CheckBothSmi(rcx, rdx);
__ j(cond, &exit);
__ incq(rax);
- __ xor_(rcx, Immediate(kSmiTagMask));
+ __ xorq(rcx, Immediate(kSmiTagMask));
cond = masm->CheckBothSmi(rcx, rdx);
__ j(cond, &exit);
@@ -649,7 +649,7 @@ TEST(SmiCheck) {
__ j(NegateCondition(cond), &exit);
// Success
- __ xor_(rax, rax);
+ __ xorq(rax, rax);
__ bind(&exit);
ExitCode(masm);
@@ -736,7 +736,7 @@ TEST(SmiNeg) {
TestSmiNeg(masm, &exit, 0x70, Smi::kMaxValue);
TestSmiNeg(masm, &exit, 0x80, -Smi::kMaxValue);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -961,7 +961,7 @@ TEST(SmiAdd) {
SmiAddOverflowTest(masm, &exit, 0xE0, -42000);
SmiAddOverflowTest(masm, &exit, 0xF0, Smi::kMinValue);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -1182,7 +1182,7 @@ TEST(SmiSub) {
SmiSubOverflowTest(masm, &exit, 0xF0, Smi::kMinValue);
SmiSubOverflowTest(masm, &exit, 0x100, 0);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -1269,7 +1269,7 @@ TEST(SmiMul) {
TestSmiMul(masm, &exit, 0xd0, (Smi::kMinValue / 2), 2);
TestSmiMul(masm, &exit, 0xe0, (Smi::kMinValue / 2) - 1, 2);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -1360,8 +1360,8 @@ TEST(SmiDiv) {
EntryCode(masm);
Label exit;
- __ push(r14);
- __ push(r15);
+ __ pushq(r14);
+ __ pushq(r15);
TestSmiDiv(masm, &exit, 0x10, 1, 1);
TestSmiDiv(masm, &exit, 0x20, 1, 0);
TestSmiDiv(masm, &exit, 0x30, -1, 0);
@@ -1383,11 +1383,11 @@ TEST(SmiDiv) {
TestSmiDiv(masm, &exit, 0x130, Smi::kMinValue, Smi::kMinValue);
TestSmiDiv(masm, &exit, 0x140, Smi::kMinValue, -1);
- __ xor_(r15, r15); // Success.
+ __ xorq(r15, r15); // Success.
__ bind(&exit);
__ movq(rax, r15);
- __ pop(r15);
- __ pop(r14);
+ __ popq(r15);
+ __ popq(r14);
ExitCode(masm);
__ ret(0);
@@ -1470,8 +1470,8 @@ TEST(SmiMod) {
EntryCode(masm);
Label exit;
- __ push(r14);
- __ push(r15);
+ __ pushq(r14);
+ __ pushq(r15);
TestSmiMod(masm, &exit, 0x10, 1, 1);
TestSmiMod(masm, &exit, 0x20, 1, 0);
TestSmiMod(masm, &exit, 0x30, -1, 0);
@@ -1493,11 +1493,11 @@ TEST(SmiMod) {
TestSmiMod(masm, &exit, 0x130, Smi::kMinValue, Smi::kMinValue);
TestSmiMod(masm, &exit, 0x140, Smi::kMinValue, -1);
- __ xor_(r15, r15); // Success.
+ __ xorq(r15, r15); // Success.
__ bind(&exit);
__ movq(rax, r15);
- __ pop(r15);
- __ pop(r14);
+ __ popq(r15);
+ __ popq(r14);
ExitCode(masm);
__ ret(0);
@@ -1573,7 +1573,7 @@ TEST(SmiIndex) {
TestSmiIndex(masm, &exit, 0x40, 1000);
TestSmiIndex(masm, &exit, 0x50, Smi::kMaxValue);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -1590,7 +1590,7 @@ void TestSelectNonSmi(MacroAssembler* masm, Label* exit, int id, int x, int y) {
__ movl(rax, Immediate(id));
__ Move(rcx, Smi::FromInt(x));
__ Move(rdx, Smi::FromInt(y));
- __ xor_(rdx, Immediate(kSmiTagMask));
+ __ xorq(rdx, Immediate(kSmiTagMask));
__ SelectNonSmi(r9, rcx, rdx, exit);
__ incq(rax);
@@ -1600,7 +1600,7 @@ void TestSelectNonSmi(MacroAssembler* masm, Label* exit, int id, int x, int y) {
__ incq(rax);
__ Move(rcx, Smi::FromInt(x));
__ Move(rdx, Smi::FromInt(y));
- __ xor_(rcx, Immediate(kSmiTagMask));
+ __ xorq(rcx, Immediate(kSmiTagMask));
__ SelectNonSmi(r9, rcx, rdx, exit);
__ incq(rax);
@@ -1611,8 +1611,8 @@ void TestSelectNonSmi(MacroAssembler* masm, Label* exit, int id, int x, int y) {
Label fail_ok;
__ Move(rcx, Smi::FromInt(x));
__ Move(rdx, Smi::FromInt(y));
- __ xor_(rcx, Immediate(kSmiTagMask));
- __ xor_(rdx, Immediate(kSmiTagMask));
+ __ xorq(rcx, Immediate(kSmiTagMask));
+ __ xorq(rdx, Immediate(kSmiTagMask));
__ SelectNonSmi(r9, rcx, rdx, &fail_ok);
__ jmp(exit);
__ bind(&fail_ok);
@@ -1646,7 +1646,7 @@ TEST(SmiSelectNonSmi) {
TestSelectNonSmi(masm, &exit, 0x80, Smi::kMinValue, Smi::kMaxValue);
TestSelectNonSmi(masm, &exit, 0x90, Smi::kMinValue, Smi::kMinValue);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -1727,7 +1727,7 @@ TEST(SmiAnd) {
TestSmiAnd(masm, &exit, 0xA0, Smi::kMinValue, -1);
TestSmiAnd(masm, &exit, 0xB0, Smi::kMinValue, -1);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -1810,7 +1810,7 @@ TEST(SmiOr) {
TestSmiOr(masm, &exit, 0xC0, 0x05555555, 0x0fedcba9);
TestSmiOr(masm, &exit, 0xD0, Smi::kMinValue, -1);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -1893,7 +1893,7 @@ TEST(SmiXor) {
TestSmiXor(masm, &exit, 0xC0, 0x5555555, 0x0fedcba9);
TestSmiXor(masm, &exit, 0xD0, Smi::kMinValue, -1);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -1955,7 +1955,7 @@ TEST(SmiNot) {
TestSmiNot(masm, &exit, 0x70, Smi::kMaxValue);
TestSmiNot(masm, &exit, 0x80, 0x05555555);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -2050,7 +2050,7 @@ TEST(SmiShiftLeft) {
TestSmiShiftLeft(masm, &exit, 0x150, Smi::kMinValue);
TestSmiShiftLeft(masm, &exit, 0x190, -1);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -2156,7 +2156,7 @@ TEST(SmiShiftLogicalRight) {
TestSmiShiftLogicalRight(masm, &exit, 0xB0, Smi::kMinValue);
TestSmiShiftLogicalRight(masm, &exit, 0xD0, -1);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -2225,7 +2225,7 @@ TEST(SmiShiftArithmeticRight) {
TestSmiShiftArithmeticRight(masm, &exit, 0x60, Smi::kMinValue);
TestSmiShiftArithmeticRight(masm, &exit, 0x70, -1);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -2291,7 +2291,7 @@ TEST(PositiveSmiTimesPowerOfTwoToInteger64) {
TestPositiveSmiPowerUp(masm, &exit, 0x120, 65536);
TestPositiveSmiPowerUp(masm, &exit, 0x140, Smi::kMaxValue);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -2324,28 +2324,28 @@ TEST(OperandOffset) {
Label exit;
EntryCode(masm);
- __ push(r13);
- __ push(r14);
- __ push(rbx);
- __ push(rbp);
- __ push(Immediate(0x100)); // <-- rbp
+ __ pushq(r13);
+ __ pushq(r14);
+ __ pushq(rbx);
+ __ pushq(rbp);
+ __ pushq(Immediate(0x100)); // <-- rbp
__ movq(rbp, rsp);
- __ push(Immediate(0x101));
- __ push(Immediate(0x102));
- __ push(Immediate(0x103));
- __ push(Immediate(0x104));
- __ push(Immediate(0x105)); // <-- rbx
- __ push(Immediate(0x106));
- __ push(Immediate(0x107));
- __ push(Immediate(0x108));
- __ push(Immediate(0x109)); // <-- rsp
+ __ pushq(Immediate(0x101));
+ __ pushq(Immediate(0x102));
+ __ pushq(Immediate(0x103));
+ __ pushq(Immediate(0x104));
+ __ pushq(Immediate(0x105)); // <-- rbx
+ __ pushq(Immediate(0x106));
+ __ pushq(Immediate(0x107));
+ __ pushq(Immediate(0x108));
+ __ pushq(Immediate(0x109)); // <-- rsp
// rbp = rsp[9]
// r15 = rsp[3]
// rbx = rsp[5]
// r13 = rsp[7]
- __ lea(r14, Operand(rsp, 3 * kPointerSize));
- __ lea(r13, Operand(rbp, -3 * kPointerSize));
- __ lea(rbx, Operand(rbp, -5 * kPointerSize));
+ __ leaq(r14, Operand(rsp, 3 * kPointerSize));
+ __ leaq(r13, Operand(rbp, -3 * kPointerSize));
+ __ leaq(rbx, Operand(rbp, -5 * kPointerSize));
__ movl(rcx, Immediate(2));
__ Move(r8, reinterpret_cast<Address>(&data[128]), RelocInfo::NONE64);
__ movl(rax, Immediate(1));
@@ -2643,11 +2643,11 @@ TEST(OperandOffset) {
__ movl(rax, Immediate(0));
__ bind(&exit);
- __ lea(rsp, Operand(rbp, kPointerSize));
- __ pop(rbp);
- __ pop(rbx);
- __ pop(r14);
- __ pop(r13);
+ __ leaq(rsp, Operand(rbp, kPointerSize));
+ __ popq(rbp);
+ __ popq(rbx);
+ __ popq(r14);
+ __ popq(r13);
ExitCode(masm);
__ ret(0);
@@ -2796,7 +2796,7 @@ TEST(LoadAndStoreWithRepresentation) {
__ cmpq(rcx, rdx);
__ j(not_equal, &exit);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
__ addq(rsp, Immediate(1 * kPointerSize));
ExitCode(masm);
diff --git a/deps/v8/test/cctest/test-mark-compact.cc b/deps/v8/test/cctest/test-mark-compact.cc
index 0c95d94f4..0200129b1 100644
--- a/deps/v8/test/cctest/test-mark-compact.cc
+++ b/deps/v8/test/cctest/test-mark-compact.cc
@@ -162,7 +162,7 @@ TEST(MarkCompactCollector) {
SharedFunctionInfo* function_share = SharedFunctionInfo::cast(
heap->AllocateSharedFunctionInfo(func_name)->ToObjectChecked());
JSFunction* function = JSFunction::cast(
- heap->AllocateFunction(*isolate->function_map(),
+ heap->AllocateFunction(*isolate->sloppy_function_map(),
function_share,
heap->undefined_value())->ToObjectChecked());
Map* initial_map =
@@ -170,7 +170,7 @@ TEST(MarkCompactCollector) {
JSObject::kHeaderSize)->ToObjectChecked());
function->set_initial_map(initial_map);
JSReceiver::SetProperty(
- global, handle(func_name), handle(function), NONE, kNonStrictMode);
+ global, handle(func_name), handle(function), NONE, SLOPPY);
JSObject* obj = JSObject::cast(
heap->AllocateJSObject(function)->ToObjectChecked());
@@ -187,13 +187,12 @@ TEST(MarkCompactCollector) {
obj = JSObject::cast(heap->AllocateJSObject(function)->ToObjectChecked());
String* obj_name =
String::cast(heap->InternalizeUtf8String("theObject")->ToObjectChecked());
- JSReceiver::SetProperty(
- global, handle(obj_name), handle(obj), NONE, kNonStrictMode);
+ JSReceiver::SetProperty(global, handle(obj_name), handle(obj), NONE, SLOPPY);
String* prop_name =
String::cast(heap->InternalizeUtf8String("theSlot")->ToObjectChecked());
Handle<Smi> twenty_three(Smi::FromInt(23), isolate);
JSReceiver::SetProperty(
- handle(obj), handle(prop_name), twenty_three, NONE, kNonStrictMode);
+ handle(obj), handle(prop_name), twenty_three, NONE, SLOPPY);
heap->CollectGarbage(OLD_POINTER_SPACE, "trigger 5");
@@ -496,6 +495,7 @@ TEST(BootUpMemoryUse) {
intptr_t initial_memory = MemoryInUse();
// Avoid flakiness.
FLAG_crankshaft = false;
+ FLAG_concurrent_osr = false;
FLAG_concurrent_recompilation = false;
// Only Linux has the proc filesystem and only if it is mapped. If it's not
diff --git a/deps/v8/test/cctest/test-mementos.cc b/deps/v8/test/cctest/test-mementos.cc
index f59eef948..1dc38f9af 100644
--- a/deps/v8/test/cctest/test-mementos.cc
+++ b/deps/v8/test/cctest/test-mementos.cc
@@ -29,11 +29,8 @@
using namespace v8::internal;
-TEST(Regress340063) {
- CcTest::InitializeVM();
- if (!i::FLAG_allocation_site_pretenuring) return;
- v8::HandleScope scope(CcTest::isolate());
+static void SetUpNewSpaceWithPoisonedMementoAtTop() {
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
NewSpace* new_space = heap->new_space();
@@ -52,8 +49,75 @@ TEST(Regress340063) {
memento->set_map_no_write_barrier(heap->allocation_memento_map());
memento->set_allocation_site(
reinterpret_cast<AllocationSite*>(kHeapObjectTag), SKIP_WRITE_BARRIER);
+}
+
+
+TEST(Regress340063) {
+ CcTest::InitializeVM();
+ if (!i::FLAG_allocation_site_pretenuring) return;
+ v8::HandleScope scope(CcTest::isolate());
+
+
+ SetUpNewSpaceWithPoisonedMementoAtTop();
// Call GC to see if we can handle a poisonous memento right after the
// current new space top pointer.
- heap->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ CcTest::i_isolate()->heap()->CollectAllGarbage(
+ Heap::kAbortIncrementalMarkingMask);
+}
+
+
+TEST(BadMementoAfterTopForceScavenge) {
+ CcTest::InitializeVM();
+ if (!i::FLAG_allocation_site_pretenuring) return;
+ v8::HandleScope scope(CcTest::isolate());
+
+ SetUpNewSpaceWithPoisonedMementoAtTop();
+
+ // Force GC to test the poisoned memento handling
+ CcTest::i_isolate()->heap()->CollectGarbage(i::NEW_SPACE);
+}
+
+
+TEST(PretenuringCallNew) {
+ CcTest::InitializeVM();
+ if (!i::FLAG_allocation_site_pretenuring) return;
+ if (!i::FLAG_pretenuring_call_new) return;
+
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+
+ // We need to create several instances to get past the slack-tracking
+ // phase, where mementos aren't emitted.
+ int call_count = 10;
+ CHECK_GE(call_count, SharedFunctionInfo::kGenerousAllocationCount);
+ i::ScopedVector<char> test_buf(1024);
+ const char* program =
+ "function f() {"
+ " this.a = 3;"
+ " this.b = {};"
+ " return this;"
+ "};"
+ "var a;"
+ "for(var i = 0; i < %d; i++) {"
+ " a = new f();"
+ "}"
+ "a;";
+ i::OS::SNPrintF(test_buf, program, call_count);
+ v8::Local<v8::Value> res = CompileRun(test_buf.start());
+ Handle<JSObject> o =
+ v8::Utils::OpenHandle(*v8::Handle<v8::Object>::Cast(res));
+
+ // The object of class f should have a memento secreted behind it.
+ Address memento_address = o->address() + o->map()->instance_size();
+ AllocationMemento* memento =
+ reinterpret_cast<AllocationMemento*>(memento_address + kHeapObjectTag);
+ CHECK_EQ(memento->map(), heap->allocation_memento_map());
+
+ // Furthermore, how many mementos did we create? The count should match
+ // call_count - SharedFunctionInfo::kGenerousAllocationCount.
+ AllocationSite* site = memento->GetAllocationSite();
+ CHECK_EQ(call_count - SharedFunctionInfo::kGenerousAllocationCount,
+ site->pretenure_create_count()->value());
}
diff --git a/deps/v8/test/cctest/test-microtask-delivery.cc b/deps/v8/test/cctest/test-microtask-delivery.cc
new file mode 100644
index 000000000..0172726af
--- /dev/null
+++ b/deps/v8/test/cctest/test-microtask-delivery.cc
@@ -0,0 +1,135 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "cctest.h"
+
+using namespace v8;
+namespace i = v8::internal;
+
+namespace {
+class HarmonyIsolate {
+ public:
+ HarmonyIsolate() {
+ isolate_ = Isolate::New();
+ isolate_->Enter();
+ }
+
+ ~HarmonyIsolate() {
+ isolate_->Exit();
+ isolate_->Dispose();
+ }
+
+ Isolate* GetIsolate() const { return isolate_; }
+
+ private:
+ Isolate* isolate_;
+};
+}
+
+
+TEST(MicrotaskDeliverySimple) {
+ HarmonyIsolate isolate;
+ HandleScope scope(isolate.GetIsolate());
+ LocalContext context(isolate.GetIsolate());
+ CompileRun(
+ "var ordering = [];"
+ "var resolver = {};"
+ "function handler(resolve) { resolver.resolve = resolve; }"
+ "var obj = {};"
+ "var observeOrders = [1, 4];"
+ "function observer() {"
+ "ordering.push(observeOrders.shift());"
+ "resolver.resolve();"
+ "}"
+ "var p = new Promise(handler);"
+ "p.then(function() {"
+ "ordering.push(2);"
+ "}).then(function() {"
+ "ordering.push(3);"
+ "obj.id++;"
+ "return new Promise(handler);"
+ "}).then(function() {"
+ "ordering.push(5);"
+ "}).then(function() {"
+ "ordering.push(6);"
+ "});"
+ "Object.observe(obj, observer);"
+ "obj.id = 1;");
+ CHECK_EQ(6, CompileRun("ordering.length")->Int32Value());
+ CHECK_EQ(1, CompileRun("ordering[0]")->Int32Value());
+ CHECK_EQ(2, CompileRun("ordering[1]")->Int32Value());
+ CHECK_EQ(3, CompileRun("ordering[2]")->Int32Value());
+ CHECK_EQ(4, CompileRun("ordering[3]")->Int32Value());
+ CHECK_EQ(5, CompileRun("ordering[4]")->Int32Value());
+ CHECK_EQ(6, CompileRun("ordering[5]")->Int32Value());
+}
+
+
+TEST(MicrotaskPerIsolateState) {
+ HarmonyIsolate isolate;
+ HandleScope scope(isolate.GetIsolate());
+ LocalContext context1(isolate.GetIsolate());
+ V8::SetAutorunMicrotasks(isolate.GetIsolate(), false);
+ CompileRun(
+ "var obj = { calls: 0 };");
+ Handle<Value> obj = CompileRun("obj");
+ {
+ LocalContext context2(isolate.GetIsolate());
+ context2->Global()->Set(String::NewFromUtf8(isolate.GetIsolate(), "obj"),
+ obj);
+ CompileRun(
+ "var resolver = {};"
+ "new Promise(function(resolve) {"
+ "resolver.resolve = resolve;"
+ "}).then(function() {"
+ "obj.calls++;"
+ "});"
+ "(function() {"
+ "resolver.resolve();"
+ "})();");
+ }
+ {
+ LocalContext context3(isolate.GetIsolate());
+ context3->Global()->Set(String::NewFromUtf8(isolate.GetIsolate(), "obj"),
+ obj);
+ CompileRun(
+ "var foo = { id: 1 };"
+ "Object.observe(foo, function() {"
+ "obj.calls++;"
+ "});"
+ "foo.id++;");
+ }
+ {
+ LocalContext context4(isolate.GetIsolate());
+ context4->Global()->Set(String::NewFromUtf8(isolate.GetIsolate(), "obj"),
+ obj);
+ V8::RunMicrotasks(isolate.GetIsolate());
+ CHECK_EQ(2, CompileRun("obj.calls")->Int32Value());
+ }
+}
diff --git a/deps/v8/test/cctest/test-object-observe.cc b/deps/v8/test/cctest/test-object-observe.cc
index 0a30d4e27..6bde5b37e 100644
--- a/deps/v8/test/cctest/test-object-observe.cc
+++ b/deps/v8/test/cctest/test-object-observe.cc
@@ -32,33 +32,10 @@
using namespace v8;
namespace i = v8::internal;
-namespace {
-// Need to create a new isolate when FLAG_harmony_observation is on.
-class HarmonyIsolate {
- public:
- HarmonyIsolate() {
- i::FLAG_harmony_observation = true;
- isolate_ = Isolate::New();
- isolate_->Enter();
- }
-
- ~HarmonyIsolate() {
- isolate_->Exit();
- isolate_->Dispose();
- }
-
- Isolate* GetIsolate() const { return isolate_; }
-
- private:
- Isolate* isolate_;
-};
-}
-
TEST(PerIsolateState) {
- HarmonyIsolate isolate;
- HandleScope scope(isolate.GetIsolate());
- LocalContext context1(isolate.GetIsolate());
+ HandleScope scope(CcTest::isolate());
+ LocalContext context1(CcTest::isolate());
CompileRun(
"var count = 0;"
"var calls = 0;"
@@ -71,29 +48,29 @@ TEST(PerIsolateState) {
"(function() { obj.foo = 'bar'; })");
Handle<Value> notify_fun2;
{
- LocalContext context2(isolate.GetIsolate());
- context2->Global()->Set(String::NewFromUtf8(isolate.GetIsolate(), "obj"),
+ LocalContext context2(CcTest::isolate());
+ context2->Global()->Set(String::NewFromUtf8(CcTest::isolate(), "obj"),
obj);
notify_fun2 = CompileRun(
"(function() { obj.foo = 'baz'; })");
}
Handle<Value> notify_fun3;
{
- LocalContext context3(isolate.GetIsolate());
- context3->Global()->Set(String::NewFromUtf8(isolate.GetIsolate(), "obj"),
+ LocalContext context3(CcTest::isolate());
+ context3->Global()->Set(String::NewFromUtf8(CcTest::isolate(), "obj"),
obj);
notify_fun3 = CompileRun(
"(function() { obj.foo = 'bat'; })");
}
{
- LocalContext context4(isolate.GetIsolate());
+ LocalContext context4(CcTest::isolate());
context4->Global()->Set(
- String::NewFromUtf8(isolate.GetIsolate(), "observer"), observer);
- context4->Global()->Set(String::NewFromUtf8(isolate.GetIsolate(), "fun1"),
+ String::NewFromUtf8(CcTest::isolate(), "observer"), observer);
+ context4->Global()->Set(String::NewFromUtf8(CcTest::isolate(), "fun1"),
notify_fun1);
- context4->Global()->Set(String::NewFromUtf8(isolate.GetIsolate(), "fun2"),
+ context4->Global()->Set(String::NewFromUtf8(CcTest::isolate(), "fun2"),
notify_fun2);
- context4->Global()->Set(String::NewFromUtf8(isolate.GetIsolate(), "fun3"),
+ context4->Global()->Set(String::NewFromUtf8(CcTest::isolate(), "fun3"),
notify_fun3);
CompileRun("fun1(); fun2(); fun3(); Object.deliverChangeRecords(observer)");
}
@@ -103,9 +80,8 @@ TEST(PerIsolateState) {
TEST(EndOfMicrotaskDelivery) {
- HarmonyIsolate isolate;
- HandleScope scope(isolate.GetIsolate());
- LocalContext context(isolate.GetIsolate());
+ HandleScope scope(CcTest::isolate());
+ LocalContext context(CcTest::isolate());
CompileRun(
"var obj = {};"
"var count = 0;"
@@ -117,9 +93,8 @@ TEST(EndOfMicrotaskDelivery) {
TEST(DeliveryOrdering) {
- HarmonyIsolate isolate;
- HandleScope scope(isolate.GetIsolate());
- LocalContext context(isolate.GetIsolate());
+ HandleScope scope(CcTest::isolate());
+ LocalContext context(CcTest::isolate());
CompileRun(
"var obj1 = {};"
"var obj2 = {};"
@@ -149,9 +124,8 @@ TEST(DeliveryOrdering) {
TEST(DeliveryOrderingReentrant) {
- HarmonyIsolate isolate;
- HandleScope scope(isolate.GetIsolate());
- LocalContext context(isolate.GetIsolate());
+ HandleScope scope(CcTest::isolate());
+ LocalContext context(CcTest::isolate());
CompileRun(
"var obj = {};"
"var reentered = false;"
@@ -181,9 +155,8 @@ TEST(DeliveryOrderingReentrant) {
TEST(DeliveryOrderingDeliverChangeRecords) {
- HarmonyIsolate isolate;
- HandleScope scope(isolate.GetIsolate());
- LocalContext context(isolate.GetIsolate());
+ HandleScope scope(CcTest::isolate());
+ LocalContext context(CcTest::isolate());
CompileRun(
"var obj = {};"
"var ordering = [];"
@@ -206,21 +179,20 @@ TEST(DeliveryOrderingDeliverChangeRecords) {
TEST(ObjectHashTableGrowth) {
- HarmonyIsolate isolate;
- HandleScope scope(isolate.GetIsolate());
+ HandleScope scope(CcTest::isolate());
// Initializing this context sets up initial hash tables.
- LocalContext context(isolate.GetIsolate());
+ LocalContext context(CcTest::isolate());
Handle<Value> obj = CompileRun("obj = {};");
Handle<Value> observer = CompileRun(
"var ran = false;"
"(function() { ran = true })");
{
// As does initializing this context.
- LocalContext context2(isolate.GetIsolate());
- context2->Global()->Set(String::NewFromUtf8(isolate.GetIsolate(), "obj"),
+ LocalContext context2(CcTest::isolate());
+ context2->Global()->Set(String::NewFromUtf8(CcTest::isolate(), "obj"),
obj);
context2->Global()->Set(
- String::NewFromUtf8(isolate.GetIsolate(), "observer"), observer);
+ String::NewFromUtf8(CcTest::isolate(), "observer"), observer);
CompileRun(
"var objArr = [];"
// 100 objects should be enough to make the hash table grow
@@ -238,9 +210,8 @@ TEST(ObjectHashTableGrowth) {
TEST(GlobalObjectObservation) {
- HarmonyIsolate isolate;
- LocalContext context(isolate.GetIsolate());
- HandleScope scope(isolate.GetIsolate());
+ LocalContext context(CcTest::isolate());
+ HandleScope scope(CcTest::isolate());
Handle<Object> global_proxy = context->Global();
CompileRun(
"var records = [];"
@@ -261,7 +232,7 @@ TEST(GlobalObjectObservation) {
// to the old context.
context->DetachGlobal();
{
- LocalContext context2(isolate.GetIsolate());
+ LocalContext context2(CcTest::isolate());
CompileRun(
"var records2 = [];"
"var global = this;"
@@ -279,7 +250,7 @@ TEST(GlobalObjectObservation) {
{
// Delegates to Context::New
LocalContext context3(
- isolate.GetIsolate(), NULL, Handle<ObjectTemplate>(), global_proxy);
+ CcTest::isolate(), NULL, Handle<ObjectTemplate>(), global_proxy);
CompileRun(
"var records3 = [];"
"Object.observe(this, function(r) { [].push.apply(records3, r) });"
@@ -327,12 +298,11 @@ static void ExpectRecords(v8::Isolate* isolate,
}
#define EXPECT_RECORDS(records, expectations) \
- ExpectRecords(isolate.GetIsolate(), records, expectations, \
+ ExpectRecords(CcTest::isolate(), records, expectations, \
ARRAY_SIZE(expectations))
TEST(APITestBasicMutation) {
- HarmonyIsolate isolate;
- v8::Isolate* v8_isolate = isolate.GetIsolate();
+ v8::Isolate* v8_isolate = CcTest::isolate();
HandleScope scope(v8_isolate);
LocalContext context(v8_isolate);
Handle<Object> obj = Handle<Object>::Cast(CompileRun(
@@ -379,8 +349,7 @@ TEST(APITestBasicMutation) {
TEST(HiddenPrototypeObservation) {
- HarmonyIsolate isolate;
- v8::Isolate* v8_isolate = isolate.GetIsolate();
+ v8::Isolate* v8_isolate = CcTest::isolate();
HandleScope scope(v8_isolate);
LocalContext context(v8_isolate);
Handle<FunctionTemplate> tmpl = FunctionTemplate::New(v8_isolate);
@@ -431,15 +400,14 @@ static int NumberOfElements(i::Handle<i::JSWeakMap> map) {
TEST(ObservationWeakMap) {
- HarmonyIsolate isolate;
- HandleScope scope(isolate.GetIsolate());
- LocalContext context(isolate.GetIsolate());
+ HandleScope scope(CcTest::isolate());
+ LocalContext context(CcTest::isolate());
CompileRun(
"var obj = {};"
"Object.observe(obj, function(){});"
"Object.getNotifier(obj);"
"obj = null;");
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate.GetIsolate());
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(CcTest::isolate());
i::Handle<i::JSObject> observation_state =
i_isolate->factory()->observation_state();
i::Handle<i::JSWeakMap> callbackInfoMap =
@@ -528,17 +496,16 @@ static Handle<Object> CreateAccessCheckedObject(
TEST(NamedAccessCheck) {
- HarmonyIsolate isolate;
const AccessType types[] = { ACCESS_GET, ACCESS_HAS };
for (size_t i = 0; i < ARRAY_SIZE(types); ++i) {
- HandleScope scope(isolate.GetIsolate());
- LocalContext context(isolate.GetIsolate());
+ HandleScope scope(CcTest::isolate());
+ LocalContext context(CcTest::isolate());
g_access_block_type = types[i];
Handle<Object> instance = CreateAccessCheckedObject(
- isolate.GetIsolate(),
+ CcTest::isolate(),
NamedAccessAllowUnlessBlocked,
IndexedAccessAlwaysAllowed,
- String::NewFromUtf8(isolate.GetIsolate(), "foo"));
+ String::NewFromUtf8(CcTest::isolate(), "foo"));
CompileRun("var records = null;"
"var objNoCheck = {};"
"var observer = function(r) { records = r };"
@@ -546,11 +513,11 @@ TEST(NamedAccessCheck) {
"Object.observe(objNoCheck, observer);");
Handle<Value> obj_no_check = CompileRun("objNoCheck");
{
- LocalContext context2(isolate.GetIsolate());
- context2->Global()->Set(String::NewFromUtf8(isolate.GetIsolate(), "obj"),
+ LocalContext context2(CcTest::isolate());
+ context2->Global()->Set(String::NewFromUtf8(CcTest::isolate(), "obj"),
instance);
context2->Global()->Set(
- String::NewFromUtf8(isolate.GetIsolate(), "objNoCheck"),
+ String::NewFromUtf8(CcTest::isolate(), "objNoCheck"),
obj_no_check);
CompileRun("var records2 = null;"
"var observer2 = function(r) { records2 = r };"
@@ -564,9 +531,9 @@ TEST(NamedAccessCheck) {
const RecordExpectation expected_records2[] = {
{ instance, "add", "foo", Handle<Value>() },
{ instance, "update", "foo",
- String::NewFromUtf8(isolate.GetIsolate(), "bar") },
+ String::NewFromUtf8(CcTest::isolate(), "bar") },
{ instance, "reconfigure", "foo",
- Number::New(isolate.GetIsolate(), 5) },
+ Number::New(CcTest::isolate(), 5) },
{ instance, "add", "bar", Handle<Value>() },
{ obj_no_check, "add", "baz", Handle<Value>() },
};
@@ -582,15 +549,14 @@ TEST(NamedAccessCheck) {
TEST(IndexedAccessCheck) {
- HarmonyIsolate isolate;
const AccessType types[] = { ACCESS_GET, ACCESS_HAS };
for (size_t i = 0; i < ARRAY_SIZE(types); ++i) {
- HandleScope scope(isolate.GetIsolate());
- LocalContext context(isolate.GetIsolate());
+ HandleScope scope(CcTest::isolate());
+ LocalContext context(CcTest::isolate());
g_access_block_type = types[i];
Handle<Object> instance = CreateAccessCheckedObject(
- isolate.GetIsolate(), NamedAccessAlwaysAllowed,
- IndexedAccessAllowUnlessBlocked, Number::New(isolate.GetIsolate(), 7));
+ CcTest::isolate(), NamedAccessAlwaysAllowed,
+ IndexedAccessAllowUnlessBlocked, Number::New(CcTest::isolate(), 7));
CompileRun("var records = null;"
"var objNoCheck = {};"
"var observer = function(r) { records = r };"
@@ -598,11 +564,11 @@ TEST(IndexedAccessCheck) {
"Object.observe(objNoCheck, observer);");
Handle<Value> obj_no_check = CompileRun("objNoCheck");
{
- LocalContext context2(isolate.GetIsolate());
- context2->Global()->Set(String::NewFromUtf8(isolate.GetIsolate(), "obj"),
+ LocalContext context2(CcTest::isolate());
+ context2->Global()->Set(String::NewFromUtf8(CcTest::isolate(), "obj"),
instance);
context2->Global()->Set(
- String::NewFromUtf8(isolate.GetIsolate(), "objNoCheck"),
+ String::NewFromUtf8(CcTest::isolate(), "objNoCheck"),
obj_no_check);
CompileRun("var records2 = null;"
"var observer2 = function(r) { records2 = r };"
@@ -616,8 +582,8 @@ TEST(IndexedAccessCheck) {
const RecordExpectation expected_records2[] = {
{ instance, "add", "7", Handle<Value>() },
{ instance, "update", "7",
- String::NewFromUtf8(isolate.GetIsolate(), "foo") },
- { instance, "reconfigure", "7", Number::New(isolate.GetIsolate(), 5) },
+ String::NewFromUtf8(CcTest::isolate(), "foo") },
+ { instance, "reconfigure", "7", Number::New(CcTest::isolate(), 5) },
{ instance, "add", "8", Handle<Value>() },
{ obj_no_check, "add", "42", Handle<Value>() }
};
@@ -633,13 +599,12 @@ TEST(IndexedAccessCheck) {
TEST(SpliceAccessCheck) {
- HarmonyIsolate isolate;
- HandleScope scope(isolate.GetIsolate());
- LocalContext context(isolate.GetIsolate());
+ HandleScope scope(CcTest::isolate());
+ LocalContext context(CcTest::isolate());
g_access_block_type = ACCESS_GET;
Handle<Object> instance = CreateAccessCheckedObject(
- isolate.GetIsolate(), NamedAccessAlwaysAllowed,
- IndexedAccessAllowUnlessBlocked, Number::New(isolate.GetIsolate(), 1));
+ CcTest::isolate(), NamedAccessAlwaysAllowed,
+ IndexedAccessAllowUnlessBlocked, Number::New(CcTest::isolate(), 1));
CompileRun("var records = null;"
"obj[1] = 'foo';"
"obj.length = 2;"
@@ -649,11 +614,11 @@ TEST(SpliceAccessCheck) {
"Array.observe(objNoCheck, observer);");
Handle<Value> obj_no_check = CompileRun("objNoCheck");
{
- LocalContext context2(isolate.GetIsolate());
- context2->Global()->Set(String::NewFromUtf8(isolate.GetIsolate(), "obj"),
+ LocalContext context2(CcTest::isolate());
+ context2->Global()->Set(String::NewFromUtf8(CcTest::isolate(), "obj"),
instance);
context2->Global()->Set(
- String::NewFromUtf8(isolate.GetIsolate(), "objNoCheck"), obj_no_check);
+ String::NewFromUtf8(CcTest::isolate(), "objNoCheck"), obj_no_check);
CompileRun("var records2 = null;"
"var observer2 = function(r) { records2 = r };"
"Array.observe(obj, observer2);"
@@ -680,11 +645,10 @@ TEST(SpliceAccessCheck) {
TEST(DisallowAllForAccessKeys) {
- HarmonyIsolate isolate;
- HandleScope scope(isolate.GetIsolate());
- LocalContext context(isolate.GetIsolate());
+ HandleScope scope(CcTest::isolate());
+ LocalContext context(CcTest::isolate());
Handle<Object> instance = CreateAccessCheckedObject(
- isolate.GetIsolate(), BlockAccessKeys, IndexedAccessAlwaysAllowed);
+ CcTest::isolate(), BlockAccessKeys, IndexedAccessAlwaysAllowed);
CompileRun("var records = null;"
"var objNoCheck = {};"
"var observer = function(r) { records = r };"
@@ -692,11 +656,11 @@ TEST(DisallowAllForAccessKeys) {
"Object.observe(objNoCheck, observer);");
Handle<Value> obj_no_check = CompileRun("objNoCheck");
{
- LocalContext context2(isolate.GetIsolate());
- context2->Global()->Set(String::NewFromUtf8(isolate.GetIsolate(), "obj"),
+ LocalContext context2(CcTest::isolate());
+ context2->Global()->Set(String::NewFromUtf8(CcTest::isolate(), "obj"),
instance);
context2->Global()->Set(
- String::NewFromUtf8(isolate.GetIsolate(), "objNoCheck"), obj_no_check);
+ String::NewFromUtf8(CcTest::isolate(), "objNoCheck"), obj_no_check);
CompileRun("var records2 = null;"
"var observer2 = function(r) { records2 = r };"
"Object.observe(obj, observer2);"
@@ -719,24 +683,23 @@ TEST(DisallowAllForAccessKeys) {
TEST(AccessCheckDisallowApiModifications) {
- HarmonyIsolate isolate;
- HandleScope scope(isolate.GetIsolate());
- LocalContext context(isolate.GetIsolate());
+ HandleScope scope(CcTest::isolate());
+ LocalContext context(CcTest::isolate());
Handle<Object> instance = CreateAccessCheckedObject(
- isolate.GetIsolate(), BlockAccessKeys, IndexedAccessAlwaysAllowed);
+ CcTest::isolate(), BlockAccessKeys, IndexedAccessAlwaysAllowed);
CompileRun("var records = null;"
"var observer = function(r) { records = r };"
"Object.observe(obj, observer);");
{
- LocalContext context2(isolate.GetIsolate());
- context2->Global()->Set(String::NewFromUtf8(isolate.GetIsolate(), "obj"),
+ LocalContext context2(CcTest::isolate());
+ context2->Global()->Set(String::NewFromUtf8(CcTest::isolate(), "obj"),
instance);
CompileRun("var records2 = null;"
"var observer2 = function(r) { records2 = r };"
"Object.observe(obj, observer2);");
- instance->Set(5, String::NewFromUtf8(isolate.GetIsolate(), "bar"));
- instance->Set(String::NewFromUtf8(isolate.GetIsolate(), "foo"),
- String::NewFromUtf8(isolate.GetIsolate(), "bar"));
+ instance->Set(5, String::NewFromUtf8(CcTest::isolate(), "bar"));
+ instance->Set(String::NewFromUtf8(CcTest::isolate(), "foo"),
+ String::NewFromUtf8(CcTest::isolate(), "bar"));
CompileRun(""); // trigger delivery
const RecordExpectation expected_records2[] = {
{ instance, "add", "5", Handle<Value>() },
@@ -749,18 +712,17 @@ TEST(AccessCheckDisallowApiModifications) {
TEST(HiddenPropertiesLeakage) {
- HarmonyIsolate isolate;
- HandleScope scope(isolate.GetIsolate());
- LocalContext context(isolate.GetIsolate());
+ HandleScope scope(CcTest::isolate());
+ LocalContext context(CcTest::isolate());
CompileRun("var obj = {};"
"var records = null;"
"var observer = function(r) { records = r };"
"Object.observe(obj, observer);");
Handle<Value> obj =
- context->Global()->Get(String::NewFromUtf8(isolate.GetIsolate(), "obj"));
+ context->Global()->Get(String::NewFromUtf8(CcTest::isolate(), "obj"));
Handle<Object>::Cast(obj)
- ->SetHiddenValue(String::NewFromUtf8(isolate.GetIsolate(), "foo"),
- Null(isolate.GetIsolate()));
+ ->SetHiddenValue(String::NewFromUtf8(CcTest::isolate(), "foo"),
+ Null(CcTest::isolate()));
CompileRun(""); // trigger delivery
CHECK(CompileRun("records")->IsNull());
}
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index 22d5056f8..2746388bb 100644
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -35,6 +35,7 @@
#include "compiler.h"
#include "execution.h"
#include "isolate.h"
+#include "objects.h"
#include "parser.h"
#include "preparser.h"
#include "scanner-character-streams.h"
@@ -212,18 +213,25 @@ TEST(Preparsing) {
{
i::FLAG_lazy = true;
ScriptResource* resource = new ScriptResource(source, source_length);
- v8::Local<v8::String> script_source =
- v8::String::NewExternal(isolate, resource);
- v8::Script::Compile(script_source, NULL, preparse);
+ v8::ScriptCompiler::Source script_source(
+ v8::String::NewExternal(isolate, resource),
+ new v8::ScriptCompiler::CachedData(
+ reinterpret_cast<const uint8_t*>(preparse->Data()),
+ preparse->Length()));
+ v8::ScriptCompiler::Compile(isolate,
+ &script_source);
}
{
i::FLAG_lazy = false;
ScriptResource* resource = new ScriptResource(source, source_length);
- v8::Local<v8::String> script_source =
- v8::String::NewExternal(isolate, resource);
- v8::Script::New(script_source, NULL, preparse, v8::Local<v8::String>());
+ v8::ScriptCompiler::Source script_source(
+ v8::String::NewExternal(isolate, resource),
+ new v8::ScriptCompiler::CachedData(
+ reinterpret_cast<const uint8_t*>(preparse->Data()),
+ preparse->Length()));
+ v8::ScriptCompiler::CompileUnbound(isolate, &script_source);
}
delete preparse;
i::FLAG_lazy = lazy_flag;
@@ -252,6 +260,99 @@ TEST(Preparsing) {
}
+TEST(PreparseFunctionDataIsUsed) {
+ // This tests that we actually do use the function data generated by the
+ // preparser.
+
+ // Make preparsing work for short scripts.
+ i::FLAG_min_preparse_length = 0;
+
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope handles(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ v8::Context::Scope context_scope(context);
+ int marker;
+ CcTest::i_isolate()->stack_guard()->SetStackLimit(
+ reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
+
+ const char* good_code =
+ "function this_is_lazy() { var a; } function foo() { return 25; } foo();";
+
+ // Insert a syntax error inside the lazy function.
+ const char* bad_code =
+ "function this_is_lazy() { if ( } function foo() { return 25; } foo();";
+
+ v8::ScriptCompiler::Source good_source(v8_str(good_code));
+ v8::ScriptCompiler::Compile(isolate, &good_source,
+ v8::ScriptCompiler::kProduceDataToCache);
+
+ const v8::ScriptCompiler::CachedData* cached_data =
+ good_source.GetCachedData();
+ CHECK(cached_data->data != NULL);
+ CHECK_GT(cached_data->length, 0);
+
+ // Now compile the erroneous code with the good preparse data. If the preparse
+ // data is used, the lazy function is skipped and it should compile fine.
+ v8::ScriptCompiler::Source bad_source(
+ v8_str(bad_code), new v8::ScriptCompiler::CachedData(
+ cached_data->data, cached_data->length));
+ v8::Local<v8::Value> result =
+ v8::ScriptCompiler::Compile(isolate, &bad_source)->Run();
+ CHECK(result->IsInt32());
+ CHECK_EQ(25, result->Int32Value());
+}
+
+
+TEST(PreparseSymbolDataIsUsed) {
+ // This tests that we actually do use the symbol data generated by the
+ // preparser.
+
+ // Only do one compilation pass in this test (otherwise we will parse the
+ // source code again without preparse data and it will fail).
+ i::FLAG_crankshaft = false;
+
+ // Make preparsing work for short scripts.
+ i::FLAG_min_preparse_length = 0;
+
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope handles(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ v8::Context::Scope context_scope(context);
+ int marker;
+ CcTest::i_isolate()->stack_guard()->SetStackLimit(
+ reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
+
+ // Note that the ( before function makes the function not lazily compiled.
+ const char* good_code =
+ "(function weird() { var foo = 26; return foo; })()";
+
+ // Insert an undefined identifier. If the preparser data is used, the symbol
+ // stream is used instead, and this identifier resolves to "foo".
+ const char* bad_code =
+ "(function weird() { var foo = 26; return wut; })()";
+
+ v8::ScriptCompiler::Source good_source(v8_str(good_code));
+ v8::ScriptCompiler::Compile(isolate, &good_source,
+ v8::ScriptCompiler::kProduceDataToCache);
+
+ const v8::ScriptCompiler::CachedData* cached_data =
+ good_source.GetCachedData();
+ CHECK(cached_data->data != NULL);
+ CHECK_GT(cached_data->length, 0);
+
+ // Now compile the erroneous code with the good preparse data. If the preparse
+ // data is used, we will see a second occurrence of "foo" instead of the
+ // unknown "wut".
+ v8::ScriptCompiler::Source bad_source(
+ v8_str(bad_code), new v8::ScriptCompiler::CachedData(
+ cached_data->data, cached_data->length));
+ v8::Local<v8::Value> result =
+ v8::ScriptCompiler::Compile(isolate, &bad_source)->Run();
+ CHECK(result->IsInt32());
+ CHECK_EQ(26, result->Int32Value());
+}
+
+
TEST(StandAlonePreParser) {
v8::V8::Initialize();
@@ -324,6 +425,99 @@ TEST(StandAlonePreParserNoNatives) {
}
+TEST(PreparsingObjectLiterals) {
+ // Regression test for a bug where the symbol stream produced by PreParser
+ // didn't match what Parser wanted to consume.
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope handles(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ v8::Context::Scope context_scope(context);
+ int marker;
+ CcTest::i_isolate()->stack_guard()->SetStackLimit(
+ reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
+
+ {
+ const char* source = "var myo = {if: \"foo\"}; myo.if;";
+ v8::Local<v8::Value> result = PreCompileCompileRun(source);
+ CHECK(result->IsString());
+ v8::String::Utf8Value utf8(result);
+ CHECK_EQ("foo", *utf8);
+ }
+
+ {
+ const char* source = "var myo = {\"bar\": \"foo\"}; myo[\"bar\"];";
+ v8::Local<v8::Value> result = PreCompileCompileRun(source);
+ CHECK(result->IsString());
+ v8::String::Utf8Value utf8(result);
+ CHECK_EQ("foo", *utf8);
+ }
+
+ {
+ const char* source = "var myo = {1: \"foo\"}; myo[1];";
+ v8::Local<v8::Value> result = PreCompileCompileRun(source);
+ CHECK(result->IsString());
+ v8::String::Utf8Value utf8(result);
+ CHECK_EQ("foo", *utf8);
+ }
+}
+
+namespace v8 {
+namespace internal {
+
+struct CompleteParserRecorderFriend {
+ static void FakeWritingSymbolIdInPreParseData(CompleteParserRecorder* log,
+ int number) {
+ log->WriteNumber(number);
+ if (log->symbol_id_ < number + 1) {
+ log->symbol_id_ = number + 1;
+ }
+ }
+ static int symbol_position(CompleteParserRecorder* log) {
+ return log->symbol_store_.size();
+ }
+ static int symbol_ids(CompleteParserRecorder* log) {
+ return log->symbol_id_;
+ }
+ static int function_position(CompleteParserRecorder* log) {
+ return log->function_store_.size();
+ }
+};
+
+}
+}
+
+
+TEST(StoringNumbersInPreParseData) {
+ // Symbol IDs are split into chunks of 7 bits for storing. This is a
+ // regression test for a bug where a symbol id was incorrectly stored if some
+ // of the chunks in the middle were all zeros.
+ typedef i::CompleteParserRecorderFriend F;
+ i::CompleteParserRecorder log;
+ for (int i = 0; i < 18; ++i) {
+ F::FakeWritingSymbolIdInPreParseData(&log, 1 << i);
+ }
+ for (int i = 1; i < 18; ++i) {
+ F::FakeWritingSymbolIdInPreParseData(&log, (1 << i) + 1);
+ }
+ for (int i = 6; i < 18; ++i) {
+ F::FakeWritingSymbolIdInPreParseData(&log, (3 << i) + (5 << (i - 6)));
+ }
+ i::Vector<unsigned> store = log.ExtractData();
+ i::ScriptDataImpl script_data(store);
+ script_data.Initialize();
+ // Check that we get the same symbols back.
+ for (int i = 0; i < 18; ++i) {
+ CHECK_EQ(1 << i, script_data.GetSymbolIdentifier());
+ }
+ for (int i = 1; i < 18; ++i) {
+ CHECK_EQ((1 << i) + 1, script_data.GetSymbolIdentifier());
+ }
+ for (int i = 6; i < 18; ++i) {
+ CHECK_EQ((3 << i) + (5 << (i - 6)), script_data.GetSymbolIdentifier());
+ }
+}
+
+
TEST(RegressChromium62639) {
v8::V8::Initialize();
i::Isolate* isolate = CcTest::i_isolate();
@@ -713,6 +907,7 @@ void TestScanRegExp(const char* re_source, const char* expected) {
i::Utf8ToUtf16CharacterStream stream(
reinterpret_cast<const i::byte*>(re_source),
static_cast<unsigned>(strlen(re_source)));
+ i::HandleScope scope(CcTest::i_isolate());
i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
scanner.Initialize(&stream);
@@ -720,8 +915,12 @@ void TestScanRegExp(const char* re_source, const char* expected) {
CHECK(start == i::Token::DIV || start == i::Token::ASSIGN_DIV);
CHECK(scanner.ScanRegExpPattern(start == i::Token::ASSIGN_DIV));
scanner.Next(); // Current token is now the regexp literal.
- CHECK(scanner.is_literal_ascii());
- i::Vector<const char> actual = scanner.literal_ascii_string();
+ i::Handle<i::String> val =
+ scanner.AllocateInternalizedString(CcTest::i_isolate());
+ i::DisallowHeapAllocation no_alloc;
+ i::String::FlatContent content = val->GetFlatContent();
+ CHECK(content.IsAscii());
+ i::Vector<const uint8_t> actual = content.ToOneByteVector();
for (int i = 0; i < actual.length(); i++) {
CHECK_NE('\0', expected[i]);
CHECK_EQ(expected[i], actual[i]);
@@ -828,6 +1027,8 @@ static int Utf8LengthHelper(const char* s) {
TEST(ScopePositions) {
+ v8::internal::FLAG_harmony_scoping = true;
+
// Test the parser for correctly setting the start and end positions
// of a scope. We check the scope positions of exactly one scope
// nested in the global scope of a program. 'inner source' is the
@@ -839,167 +1040,167 @@ TEST(ScopePositions) {
const char* inner_source;
const char* outer_suffix;
i::ScopeType scope_type;
- i::LanguageMode language_mode;
+ i::StrictMode strict_mode;
};
const SourceData source_data[] = {
- { " with ({}) ", "{ block; }", " more;", i::WITH_SCOPE, i::CLASSIC_MODE },
- { " with ({}) ", "{ block; }", "; more;", i::WITH_SCOPE, i::CLASSIC_MODE },
+ { " with ({}) ", "{ block; }", " more;", i::WITH_SCOPE, i::SLOPPY },
+ { " with ({}) ", "{ block; }", "; more;", i::WITH_SCOPE, i::SLOPPY },
{ " with ({}) ", "{\n"
" block;\n"
" }", "\n"
- " more;", i::WITH_SCOPE, i::CLASSIC_MODE },
- { " with ({}) ", "statement;", " more;", i::WITH_SCOPE, i::CLASSIC_MODE },
+ " more;", i::WITH_SCOPE, i::SLOPPY },
+ { " with ({}) ", "statement;", " more;", i::WITH_SCOPE, i::SLOPPY },
{ " with ({}) ", "statement", "\n"
- " more;", i::WITH_SCOPE, i::CLASSIC_MODE },
+ " more;", i::WITH_SCOPE, i::SLOPPY },
{ " with ({})\n"
" ", "statement;", "\n"
- " more;", i::WITH_SCOPE, i::CLASSIC_MODE },
+ " more;", i::WITH_SCOPE, i::SLOPPY },
{ " try {} catch ", "(e) { block; }", " more;",
- i::CATCH_SCOPE, i::CLASSIC_MODE },
+ i::CATCH_SCOPE, i::SLOPPY },
{ " try {} catch ", "(e) { block; }", "; more;",
- i::CATCH_SCOPE, i::CLASSIC_MODE },
+ i::CATCH_SCOPE, i::SLOPPY },
{ " try {} catch ", "(e) {\n"
" block;\n"
" }", "\n"
- " more;", i::CATCH_SCOPE, i::CLASSIC_MODE },
+ " more;", i::CATCH_SCOPE, i::SLOPPY },
{ " try {} catch ", "(e) { block; }", " finally { block; } more;",
- i::CATCH_SCOPE, i::CLASSIC_MODE },
+ i::CATCH_SCOPE, i::SLOPPY },
{ " start;\n"
- " ", "{ let block; }", " more;", i::BLOCK_SCOPE, i::EXTENDED_MODE },
+ " ", "{ let block; }", " more;", i::BLOCK_SCOPE, i::STRICT },
{ " start;\n"
- " ", "{ let block; }", "; more;", i::BLOCK_SCOPE, i::EXTENDED_MODE },
+ " ", "{ let block; }", "; more;", i::BLOCK_SCOPE, i::STRICT },
{ " start;\n"
" ", "{\n"
" let block;\n"
" }", "\n"
- " more;", i::BLOCK_SCOPE, i::EXTENDED_MODE },
+ " more;", i::BLOCK_SCOPE, i::STRICT },
{ " start;\n"
" function fun", "(a,b) { infunction; }", " more;",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
{ " start;\n"
" function fun", "(a,b) {\n"
" infunction;\n"
" }", "\n"
- " more;", i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ " more;", i::FUNCTION_SCOPE, i::SLOPPY },
{ " (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
{ " for ", "(let x = 1 ; x < 10; ++ x) { block; }", " more;",
- i::BLOCK_SCOPE, i::EXTENDED_MODE },
+ i::BLOCK_SCOPE, i::STRICT },
{ " for ", "(let x = 1 ; x < 10; ++ x) { block; }", "; more;",
- i::BLOCK_SCOPE, i::EXTENDED_MODE },
+ i::BLOCK_SCOPE, i::STRICT },
{ " for ", "(let x = 1 ; x < 10; ++ x) {\n"
" block;\n"
" }", "\n"
- " more;", i::BLOCK_SCOPE, i::EXTENDED_MODE },
+ " more;", i::BLOCK_SCOPE, i::STRICT },
{ " for ", "(let x = 1 ; x < 10; ++ x) statement;", " more;",
- i::BLOCK_SCOPE, i::EXTENDED_MODE },
+ i::BLOCK_SCOPE, i::STRICT },
{ " for ", "(let x = 1 ; x < 10; ++ x) statement", "\n"
- " more;", i::BLOCK_SCOPE, i::EXTENDED_MODE },
+ " more;", i::BLOCK_SCOPE, i::STRICT },
{ " for ", "(let x = 1 ; x < 10; ++ x)\n"
" statement;", "\n"
- " more;", i::BLOCK_SCOPE, i::EXTENDED_MODE },
+ " more;", i::BLOCK_SCOPE, i::STRICT },
{ " for ", "(let x in {}) { block; }", " more;",
- i::BLOCK_SCOPE, i::EXTENDED_MODE },
+ i::BLOCK_SCOPE, i::STRICT },
{ " for ", "(let x in {}) { block; }", "; more;",
- i::BLOCK_SCOPE, i::EXTENDED_MODE },
+ i::BLOCK_SCOPE, i::STRICT },
{ " for ", "(let x in {}) {\n"
" block;\n"
" }", "\n"
- " more;", i::BLOCK_SCOPE, i::EXTENDED_MODE },
+ " more;", i::BLOCK_SCOPE, i::STRICT },
{ " for ", "(let x in {}) statement;", " more;",
- i::BLOCK_SCOPE, i::EXTENDED_MODE },
+ i::BLOCK_SCOPE, i::STRICT },
{ " for ", "(let x in {}) statement", "\n"
- " more;", i::BLOCK_SCOPE, i::EXTENDED_MODE },
+ " more;", i::BLOCK_SCOPE, i::STRICT },
{ " for ", "(let x in {})\n"
" statement;", "\n"
- " more;", i::BLOCK_SCOPE, i::EXTENDED_MODE },
+ " more;", i::BLOCK_SCOPE, i::STRICT },
// Check that 6-byte and 4-byte encodings of UTF-8 strings do not throw
// the preparser off in terms of byte offsets.
// 6 byte encoding.
{ " 'foo\355\240\201\355\260\211';\n"
" (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
// 4 byte encoding.
{ " 'foo\360\220\220\212';\n"
" (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
// 3 byte encoding of \u0fff.
{ " 'foo\340\277\277';\n"
" (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
// Broken 6 byte encoding with missing last byte.
{ " 'foo\355\240\201\355\211';\n"
" (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
// Broken 3 byte encoding of \u0fff with missing last byte.
{ " 'foo\340\277';\n"
" (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
// Broken 3 byte encoding of \u0fff with missing 2 last bytes.
{ " 'foo\340';\n"
" (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
// Broken 3 byte encoding of \u00ff should be a 2 byte encoding.
{ " 'foo\340\203\277';\n"
" (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
// Broken 3 byte encoding of \u007f should be a 2 byte encoding.
{ " 'foo\340\201\277';\n"
" (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
// Unpaired lead surrogate.
{ " 'foo\355\240\201';\n"
" (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
// Unpaired lead surrogate where following code point is a 3 byte sequence.
{ " 'foo\355\240\201\340\277\277';\n"
" (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
// Unpaired lead surrogate where following code point is a 4 byte encoding
// of a trail surrogate.
{ " 'foo\355\240\201\360\215\260\211';\n"
" (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
// Unpaired trail surrogate.
{ " 'foo\355\260\211';\n"
" (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
// 2 byte encoding of \u00ff.
{ " 'foo\303\277';\n"
" (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
// Broken 2 byte encoding of \u00ff with missing last byte.
{ " 'foo\303';\n"
" (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
// Broken 2 byte encoding of \u007f should be a 1 byte encoding.
{ " 'foo\301\277';\n"
" (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
// Illegal 5 byte encoding.
{ " 'foo\370\277\277\277\277';\n"
" (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
// Illegal 6 byte encoding.
{ " 'foo\374\277\277\277\277\277';\n"
" (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
// Illegal 0xfe byte
{ " 'foo\376\277\277\277\277\277\277';\n"
" (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
// Illegal 0xff byte
{ " 'foo\377\277\277\277\277\277\277\277';\n"
" (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
{ " 'foo';\n"
" (function fun", "(a,b) { 'bar\355\240\201\355\260\213'; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
{ " 'foo';\n"
" (function fun", "(a,b) { 'bar\360\220\220\214'; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
- { NULL, NULL, NULL, i::EVAL_SCOPE, i::CLASSIC_MODE }
+ i::FUNCTION_SCOPE, i::SLOPPY },
+ { NULL, NULL, NULL, i::EVAL_SCOPE, i::SLOPPY }
};
i::Isolate* isolate = CcTest::i_isolate();
@@ -1038,7 +1239,7 @@ TEST(ScopePositions) {
parser.set_allow_lazy(true);
parser.set_allow_harmony_scoping(true);
info.MarkAsGlobal();
- info.SetLanguageMode(source_data[i].language_mode);
+ info.SetStrictMode(source_data[i].strict_mode);
parser.Parse();
CHECK(info.function() != NULL);
@@ -1071,7 +1272,7 @@ i::Handle<i::String> FormatMessage(i::ScriptDataImpl* data) {
i::JSArray::SetElement(
args_array, i, v8::Utils::OpenHandle(*v8::String::NewFromUtf8(
CcTest::isolate(), args[i])),
- NONE, i::kNonStrictMode);
+ NONE, i::SLOPPY);
}
i::Handle<i::JSObject> builtins(isolate->js_builtins_object());
i::Handle<i::Object> format_fun =
@@ -1108,8 +1309,9 @@ enum ParserSyncTestResult {
kError
};
-
-void SetParserFlags(i::ParserBase* parser, i::EnumSet<ParserFlag> flags) {
+template <typename Traits>
+void SetParserFlags(i::ParserBase<Traits>* parser,
+ i::EnumSet<ParserFlag> flags) {
parser->set_allow_lazy(flags.Contains(kAllowLazy));
parser->set_allow_natives_syntax(flags.Contains(kAllowNativesSyntax));
parser->set_allow_harmony_scoping(flags.Contains(kAllowHarmonyScoping));
@@ -1379,7 +1581,9 @@ TEST(PreparserStrictOctal) {
void RunParserSyncTest(const char* context_data[][2],
const char* statement_data[],
- ParserSyncTestResult result) {
+ ParserSyncTestResult result,
+ const ParserFlag* flags = NULL,
+ int flags_len = 0) {
v8::HandleScope handles(CcTest::isolate());
v8::Handle<v8::Context> context = v8::Context::New(CcTest::isolate());
v8::Context::Scope context_scope(context);
@@ -1388,10 +1592,14 @@ void RunParserSyncTest(const char* context_data[][2],
CcTest::i_isolate()->stack_guard()->SetStackLimit(
reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
- static const ParserFlag flags[] = {
+ static const ParserFlag default_flags[] = {
kAllowLazy, kAllowHarmonyScoping, kAllowModules, kAllowGenerators,
- kAllowForOf
+ kAllowForOf, kAllowNativesSyntax
};
+ if (!flags) {
+ flags = default_flags;
+ flags_len = ARRAY_SIZE(default_flags);
+ }
for (int i = 0; context_data[i][0] != NULL; ++i) {
for (int j = 0; statement_data[j] != NULL; ++j) {
int kPrefixLen = i::StrLength(context_data[i][0]);
@@ -1409,7 +1617,7 @@ void RunParserSyncTest(const char* context_data[][2],
CHECK(length == kProgramSize);
TestParserSync(program.start(),
flags,
- ARRAY_SIZE(flags),
+ flags_len,
result);
}
}
@@ -1455,7 +1663,7 @@ TEST(ErrorsEvalAndArguments) {
}
-TEST(NoErrorsEvalAndArgumentsClassic) {
+TEST(NoErrorsEvalAndArgumentsSloppy) {
// Tests that both preparsing and parsing accept "eval" and "arguments" as
// identifiers when needed.
const char* context_data[][2] = {
@@ -1600,8 +1808,8 @@ TEST(ErrorsReservedWords) {
}
-TEST(NoErrorsYieldClassic) {
- // In classic mode, it's okay to use "yield" as identifier, *except* inside a
+TEST(NoErrorsYieldSloppy) {
+ // In sloppy mode, it's okay to use "yield" as identifier, *except* inside a
// generator (see next test).
const char* context_data[][2] = {
{ "", "" },
@@ -1627,7 +1835,7 @@ TEST(NoErrorsYieldClassic) {
}
-TEST(ErrorsYieldClassicGenerator) {
+TEST(ErrorsYieldSloppyGenerator) {
const char* context_data[][2] = {
{ "function * is_gen() {", "}" },
{ NULL, NULL }
@@ -1743,7 +1951,7 @@ TEST(NoErrorsNameOfStrictFunction) {
-TEST(ErrorsIllegalWordsAsLabelsClassic) {
+TEST(ErrorsIllegalWordsAsLabelsSloppy) {
// Using future reserved words as labels is always an error.
const char* context_data[][2] = {
{ "", ""},
@@ -1880,7 +2088,6 @@ TEST(DontRegressPreParserDataSizes) {
// These tests make sure that PreParser doesn't start producing less data.
v8::V8::Initialize();
-
int marker;
CcTest::i_isolate()->stack_guard()->SetStackLimit(
reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
@@ -1890,9 +2097,18 @@ TEST(DontRegressPreParserDataSizes) {
int symbols;
int functions;
} test_cases[] = {
- // Labels, variables and functions are recorded as symbols.
+ // Labels and variables are recorded as symbols.
{"{label: 42}", 1, 0}, {"{label: 42; label2: 43}", 2, 0},
{"var x = 42;", 1, 0}, {"var x = 42, y = 43;", 2, 0},
+ {"var x = {y: 1};", 2, 0},
+ {"var x = {}; x.y = 1", 2, 0},
+ // "get" is recorded as a symbol too.
+ {"var x = {get foo(){} };", 3, 1},
+ // When keywords are used as identifiers, they're logged as symbols, too:
+ {"var x = {if: 1};", 2, 0},
+ {"var x = {}; x.if = 1", 2, 0},
+ {"var x = {get if(){} };", 3, 1},
+ // Functions
{"function foo() {}", 1, 1}, {"function foo() {} function bar() {}", 2, 2},
// Labels, variables and functions insize lazy functions are not recorded.
{"function lazy() { var a, b, c; }", 1, 1},
@@ -1904,6 +2120,7 @@ TEST(DontRegressPreParserDataSizes) {
// Each function adds 5 elements to the preparse function data.
const int kDataPerFunction = 5;
+ typedef i::CompleteParserRecorderFriend F;
uintptr_t stack_limit = CcTest::i_isolate()->stack_guard()->real_climit();
for (int i = 0; test_cases[i].program; i++) {
const char* program = test_cases[i].program;
@@ -1919,21 +2136,22 @@ TEST(DontRegressPreParserDataSizes) {
preparser.set_allow_natives_syntax(true);
i::PreParser::PreParseResult result = preparser.PreParseProgram();
CHECK_EQ(i::PreParser::kPreParseSuccess, result);
- if (log.symbol_ids() != test_cases[i].symbols) {
+ if (F::symbol_ids(&log) != test_cases[i].symbols) {
i::OS::Print(
"Expected preparse data for program:\n"
"\t%s\n"
"to contain %d symbols, however, received %d symbols.\n",
- program, test_cases[i].symbols, log.symbol_ids());
+ program, test_cases[i].symbols, F::symbol_ids(&log));
CHECK(false);
}
- if (log.function_position() != test_cases[i].functions * kDataPerFunction) {
+ if (F::function_position(&log) !=
+ test_cases[i].functions * kDataPerFunction) {
i::OS::Print(
"Expected preparse data for program:\n"
"\t%s\n"
"to contain %d functions, however, received %d functions.\n",
program, test_cases[i].functions,
- log.function_position() / kDataPerFunction);
+ F::function_position(&log) / kDataPerFunction);
CHECK(false);
}
i::ScriptDataImpl data(log.ExtractData());
@@ -2013,3 +2231,366 @@ TEST(NoErrorsTryCatchFinally) {
RunParserSyncTest(context_data, statement_data, kSuccess);
}
+
+
+TEST(ErrorsRegexpLiteral) {
+ const char* context_data[][2] = {
+ {"var r = ", ""},
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "/unterminated",
+ NULL
+ };
+
+ RunParserSyncTest(context_data, statement_data, kError);
+}
+
+
+TEST(NoErrorsRegexpLiteral) {
+ const char* context_data[][2] = {
+ {"var r = ", ""},
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "/foo/",
+ "/foo/g",
+ "/foo/whatever", // This is an error but not detected by the parser.
+ NULL
+ };
+
+ RunParserSyncTest(context_data, statement_data, kSuccess);
+}
+
+
+TEST(Intrinsics) {
+ const char* context_data[][2] = {
+ {"", ""},
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "%someintrinsic(arg)",
+ NULL
+ };
+
+ // Parsing will fail or succeed depending on whether we allow natives syntax
+ // or not.
+ RunParserSyncTest(context_data, statement_data, kSuccessOrError);
+}
+
+
+TEST(NoErrorsNewExpression) {
+ const char* context_data[][2] = {
+ {"", ""},
+ {"var f =", ""},
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "new foo",
+ "new foo();",
+ "new foo(1);",
+ "new foo(1, 2);",
+ // The first () will be processed as a part of the NewExpression and the
+ // second () will be processed as part of LeftHandSideExpression.
+ "new foo()();",
+ // The first () will be processed as a part of the inner NewExpression and
+ // the second () will be processed as a part of the outer NewExpression.
+ "new new foo()();",
+ "new foo.bar;",
+ "new foo.bar();",
+ "new foo.bar.baz;",
+ "new foo.bar().baz;",
+ "new foo[bar];",
+ "new foo[bar]();",
+ "new foo[bar][baz];",
+ "new foo[bar]()[baz];",
+ "new foo[bar].baz(baz)()[bar].baz;",
+ "new \"foo\"", // Runtime error
+ "new 1", // Runtime error
+ // This even runs:
+ "(new new Function(\"this.x = 1\")).x;",
+ "new new Test_Two(String, 2).v(0123).length;",
+ NULL
+ };
+
+ RunParserSyncTest(context_data, statement_data, kSuccess);
+}
+
+
+TEST(ErrorsNewExpression) {
+ const char* context_data[][2] = {
+ {"", ""},
+ {"var f =", ""},
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "new foo bar",
+ "new ) foo",
+ "new ++foo",
+ "new foo ++",
+ NULL
+ };
+
+ RunParserSyncTest(context_data, statement_data, kError);
+}
+
+
+TEST(StrictObjectLiteralChecking) {
+ const char* strict_context_data[][2] = {
+ {"\"use strict\"; var myobject = {", "};"},
+ { NULL, NULL }
+ };
+ const char* non_strict_context_data[][2] = {
+ {"var myobject = {", "};"},
+ { NULL, NULL }
+ };
+
+ // These are only errors in strict mode.
+ const char* statement_data[] = {
+ "foo: 1, foo: 2",
+ "\"foo\": 1, \"foo\": 2",
+ "foo: 1, \"foo\": 2",
+ "1: 1, 1: 2",
+ "1: 1, \"1\": 2",
+ "get: 1, get: 2", // Not a getter for real, just a property called get.
+ "set: 1, set: 2", // Not a setter for real, just a property called set.
+ NULL
+ };
+
+ RunParserSyncTest(non_strict_context_data, statement_data, kSuccess);
+ RunParserSyncTest(strict_context_data, statement_data, kError);
+}
+
+
+TEST(ErrorsObjectLiteralChecking) {
+ const char* context_data[][2] = {
+ {"\"use strict\"; var myobject = {", "};"},
+ {"var myobject = {", "};"},
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "foo: 1, get foo() {}",
+ "foo: 1, set foo() {}",
+ "\"foo\": 1, get \"foo\"() {}",
+ "\"foo\": 1, set \"foo\"() {}",
+ "1: 1, get 1() {}",
+ "1: 1, set 1() {}",
+ // It's counter-intuitive, but these collide too (even in classic
+ // mode). Note that we can have "foo" and foo as properties in classic mode,
+ // but we cannot have "foo" and get foo, or foo and get "foo".
+ "foo: 1, get \"foo\"() {}",
+ "foo: 1, set \"foo\"() {}",
+ "\"foo\": 1, get foo() {}",
+ "\"foo\": 1, set foo() {}",
+ "1: 1, get \"1\"() {}",
+ "1: 1, set \"1\"() {}",
+ "\"1\": 1, get 1() {}"
+ "\"1\": 1, set 1() {}"
+ // Parsing FunctionLiteral for getter or setter fails
+ "get foo( +",
+ "get foo() \"error\"",
+ NULL
+ };
+
+ RunParserSyncTest(context_data, statement_data, kError);
+}
+
+
+TEST(NoErrorsObjectLiteralChecking) {
+ const char* context_data[][2] = {
+ {"var myobject = {", "};"},
+ {"\"use strict\"; var myobject = {", "};"},
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "foo: 1, bar: 2",
+ "\"foo\": 1, \"bar\": 2",
+ "1: 1, 2: 2",
+ // Syntax: IdentifierName ':' AssignmentExpression
+ "foo: bar = 5 + baz",
+ // Syntax: 'get' (IdentifierName | String | Number) FunctionLiteral
+ "get foo() {}",
+ "get \"foo\"() {}",
+ "get 1() {}",
+ // Syntax: 'set' (IdentifierName | String | Number) FunctionLiteral
+ "set foo() {}",
+ "set \"foo\"() {}",
+ "set 1() {}",
+ // Non-colliding getters and setters -> no errors
+ "foo: 1, get bar() {}",
+ "foo: 1, set bar(b) {}",
+ "\"foo\": 1, get \"bar\"() {}",
+ "\"foo\": 1, set \"bar\"() {}",
+ "1: 1, get 2() {}",
+ "1: 1, set 2() {}",
+ // Weird number of parameters -> no errors
+ "get bar() {}, set bar() {}",
+ "get bar(x) {}, set bar(x) {}",
+ "get bar(x, y) {}, set bar(x, y) {}",
+ // Keywords, future reserved and strict future reserved are also allowed as
+ // property names.
+ "if: 4",
+ "interface: 5",
+ "super: 6",
+ "eval: 7",
+ "arguments: 8",
+ NULL
+ };
+
+ RunParserSyncTest(context_data, statement_data, kSuccess);
+}
+
+
+TEST(TooManyArguments) {
+ const char* context_data[][2] = {
+ {"foo(", "0)"},
+ { NULL, NULL }
+ };
+
+ using v8::internal::Code;
+ char statement[Code::kMaxArguments * 2 + 1];
+ for (int i = 0; i < Code::kMaxArguments; ++i) {
+ statement[2 * i] = '0';
+ statement[2 * i + 1] = ',';
+ }
+ statement[Code::kMaxArguments * 2] = 0;
+
+ const char* statement_data[] = {
+ statement,
+ NULL
+ };
+
+ // The test is quite slow, so run it with a reduced set of flags.
+ static const ParserFlag empty_flags[] = {kAllowLazy};
+ RunParserSyncTest(context_data, statement_data, kError, empty_flags, 1);
+}
+
+
+TEST(StrictDelete) {
+ // "delete <Identifier>" is not allowed in strict mode.
+ const char* strict_context_data[][2] = {
+ {"\"use strict\"; ", ""},
+ { NULL, NULL }
+ };
+
+ const char* sloppy_context_data[][2] = {
+ {"", ""},
+ { NULL, NULL }
+ };
+
+ // These are errors in the strict mode.
+ const char* sloppy_statement_data[] = {
+ "delete foo;",
+ "delete foo + 1;",
+ "delete (foo);",
+ "delete eval;",
+ "delete interface;",
+ NULL
+ };
+
+ // These are always OK
+ const char* good_statement_data[] = {
+ "delete this;",
+ "delete 1;",
+ "delete 1 + 2;",
+ "delete foo();",
+ "delete foo.bar;",
+ "delete foo[bar];",
+ "delete foo--;",
+ "delete --foo;",
+ "delete new foo();",
+ "delete new foo(bar);",
+ NULL
+ };
+
+ // These are always errors
+ const char* bad_statement_data[] = {
+ "delete if;",
+ NULL
+ };
+
+ RunParserSyncTest(strict_context_data, sloppy_statement_data, kError);
+ RunParserSyncTest(sloppy_context_data, sloppy_statement_data, kSuccess);
+
+ RunParserSyncTest(strict_context_data, good_statement_data, kSuccess);
+ RunParserSyncTest(sloppy_context_data, good_statement_data, kSuccess);
+
+ RunParserSyncTest(strict_context_data, bad_statement_data, kError);
+ RunParserSyncTest(sloppy_context_data, bad_statement_data, kError);
+}
+
+
+TEST(InvalidLeftHandSide) {
+ const char* assignment_context_data[][2] = {
+ {"", " = 1;"},
+ {"\"use strict\"; ", " = 1;"},
+ { NULL, NULL }
+ };
+
+ const char* prefix_context_data[][2] = {
+ {"++", ";"},
+ {"\"use strict\"; ++", ";"},
+ {NULL, NULL},
+ };
+
+ const char* postfix_context_data[][2] = {
+ {"", "++;"},
+ {"\"use strict\"; ", "++;"},
+ { NULL, NULL }
+ };
+
+ // Good left hand sides for assigment or prefix / postfix operations.
+ const char* good_statement_data[] = {
+ "foo",
+ "foo.bar",
+ "foo[bar]",
+ "foo()[bar]",
+ "foo().bar",
+ "this.foo",
+ "this[foo]",
+ "new foo()[bar]",
+ "new foo().bar",
+ NULL
+ };
+
+ // Bad left hand sides for assigment or prefix / postfix operations.
+ const char* bad_statement_data_common[] = {
+ "2",
+ "foo()",
+ "null",
+ "if", // Unexpected token
+ "{x: 1}", // Unexpected token
+ "this",
+ "\"bar\"",
+ "(foo + bar)",
+ "new new foo()[bar]", // means: new (new foo()[bar])
+ "new new foo().bar", // means: new (new foo()[bar])
+ NULL
+ };
+
+ // These are not okay for assignment, but okay for prefix / postix.
+ const char* bad_statement_data_for_assignment[] = {
+ "++foo",
+ "foo++",
+ "foo + bar",
+ NULL
+ };
+
+ RunParserSyncTest(assignment_context_data, good_statement_data, kSuccess);
+ RunParserSyncTest(assignment_context_data, bad_statement_data_common, kError);
+ RunParserSyncTest(assignment_context_data, bad_statement_data_for_assignment,
+ kError);
+
+ RunParserSyncTest(prefix_context_data, good_statement_data, kSuccess);
+ RunParserSyncTest(prefix_context_data, bad_statement_data_common, kError);
+
+ RunParserSyncTest(postfix_context_data, good_statement_data, kSuccess);
+ RunParserSyncTest(postfix_context_data, bad_statement_data_common, kError);
+}
diff --git a/deps/v8/test/cctest/test-platform.cc b/deps/v8/test/cctest/test-platform.cc
index eca0ab72a..b9f8bafe4 100644
--- a/deps/v8/test/cctest/test-platform.cc
+++ b/deps/v8/test/cctest/test-platform.cc
@@ -53,6 +53,12 @@ using namespace ::v8::internal;
do { \
ASM("str %%sp, %0" : "=g" (sp_addr)); \
} while (0)
+#elif defined(__AARCH64EL__)
+#define GET_STACK_POINTER() \
+ static int sp_addr = 0; \
+ do { \
+ ASM("mov x16, sp; str x16, %0" : "=g" (sp_addr)); \
+ } while (0)
#elif defined(__MIPSEL__)
#define GET_STACK_POINTER() \
static int sp_addr = 0; \
diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc
index d0193520f..712fec056 100644
--- a/deps/v8/test/cctest/test-regexp.cc
+++ b/deps/v8/test/cctest/test-regexp.cc
@@ -49,6 +49,11 @@
#include "arm/macro-assembler-arm.h"
#include "arm/regexp-macro-assembler-arm.h"
#endif
+#if V8_TARGET_ARCH_ARM64
+#include "arm64/assembler-arm64.h"
+#include "arm64/macro-assembler-arm64.h"
+#include "arm64/regexp-macro-assembler-arm64.h"
+#endif
#if V8_TARGET_ARCH_MIPS
#include "mips/assembler-mips.h"
#include "mips/macro-assembler-mips.h"
@@ -444,27 +449,15 @@ static bool NotDigit(uc16 c) {
}
-static bool IsWhiteSpace(uc16 c) {
- switch (c) {
- case 0x09:
- case 0x0A:
- case 0x0B:
- case 0x0C:
- case 0x0d:
- case 0x20:
- case 0xA0:
- case 0x2028:
- case 0x2029:
- case 0xFEFF:
- return true;
- default:
- return unibrow::Space::Is(c);
- }
+static bool IsWhiteSpaceOrLineTerminator(uc16 c) {
+ // According to ECMA 5.1, 15.10.2.12 the CharacterClassEscape \s includes
+ // WhiteSpace (7.2) and LineTerminator (7.3) values.
+ return v8::internal::WhiteSpaceOrLineTerminator::Is(c);
}
-static bool NotWhiteSpace(uc16 c) {
- return !IsWhiteSpace(c);
+static bool NotWhiteSpaceNorLineTermiantor(uc16 c) {
+ return !IsWhiteSpaceOrLineTerminator(c);
}
@@ -494,8 +487,8 @@ TEST(CharacterClassEscapes) {
TestCharacterClassEscapes('.', IsRegExpNewline);
TestCharacterClassEscapes('d', IsDigit);
TestCharacterClassEscapes('D', NotDigit);
- TestCharacterClassEscapes('s', IsWhiteSpace);
- TestCharacterClassEscapes('S', NotWhiteSpace);
+ TestCharacterClassEscapes('s', IsWhiteSpaceOrLineTerminator);
+ TestCharacterClassEscapes('S', NotWhiteSpaceNorLineTermiantor);
TestCharacterClassEscapes('w', IsRegExpWord);
TestCharacterClassEscapes('W', NotWord);
}
@@ -701,6 +694,8 @@ typedef RegExpMacroAssemblerIA32 ArchRegExpMacroAssembler;
typedef RegExpMacroAssemblerX64 ArchRegExpMacroAssembler;
#elif V8_TARGET_ARCH_ARM
typedef RegExpMacroAssemblerARM ArchRegExpMacroAssembler;
+#elif V8_TARGET_ARCH_ARM64
+typedef RegExpMacroAssemblerARM64 ArchRegExpMacroAssembler;
#elif V8_TARGET_ARCH_MIPS
typedef RegExpMacroAssemblerMIPS ArchRegExpMacroAssembler;
#endif
diff --git a/deps/v8/test/cctest/test-strings.cc b/deps/v8/test/cctest/test-strings.cc
index 4b31e614d..6ff52003b 100644
--- a/deps/v8/test/cctest/test-strings.cc
+++ b/deps/v8/test/cctest/test-strings.cc
@@ -661,7 +661,7 @@ void TestStringCharacterStream(BuildString build, int test_cases) {
for (int i = 0; i < test_cases; i++) {
printf("%d\n", i);
HandleScope inner_scope(isolate);
- AlwaysAllocateScope always_allocate;
+ AlwaysAllocateScope always_allocate(isolate);
// Build flat version of cons string.
Handle<String> flat_string = build(i, &data);
ConsStringStats flat_string_stats;
@@ -1209,24 +1209,17 @@ TEST(AsciiArrayJoin) {
// starting with 'bad', followed by 2^14 times the string s. That means the
// total length of the concatenated strings is 2^31 + 3. So on 32bit systems
// summing the lengths of the strings (as Smis) overflows and wraps.
- static const char* join_causing_out_of_memory =
+ LocalContext context;
+ v8::HandleScope scope(CcTest::isolate());
+ v8::TryCatch try_catch;
+ CHECK(CompileRun(
"var two_14 = Math.pow(2, 14);"
"var two_17 = Math.pow(2, 17);"
"var s = Array(two_17 + 1).join('c');"
"var a = ['bad'];"
"for (var i = 1; i <= two_14; i++) a.push(s);"
- "a.join("");";
-
- v8::HandleScope scope(CcTest::isolate());
- LocalContext context;
- v8::V8::IgnoreOutOfMemoryException();
- v8::Local<v8::Script> script = v8::Script::Compile(
- v8::String::NewFromUtf8(CcTest::isolate(), join_causing_out_of_memory));
- v8::Local<v8::Value> result = script->Run();
-
- // Check for out of memory state.
- CHECK(result.IsEmpty());
- CHECK(context->HasOutOfMemoryException());
+ "a.join("");").IsEmpty());
+ CHECK(try_catch.HasCaught());
}
@@ -1282,23 +1275,6 @@ TEST(RobustSubStringStub) {
}
-TEST(RegExpOverflow) {
- // Result string has the length 2^32, causing a 32-bit integer overflow.
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
- LocalContext context;
- v8::V8::IgnoreOutOfMemoryException();
- v8::Local<v8::Value> result = CompileRun(
- "var a = 'a'; "
- "for (var i = 0; i < 16; i++) { "
- " a += a; "
- "} "
- "a.replace(/a/g, a); ");
- CHECK(result.IsEmpty());
- CHECK(context->HasOutOfMemoryException());
-}
-
-
TEST(StringReplaceAtomTwoByteResult) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
@@ -1376,3 +1352,62 @@ TEST(Latin1IgnoreCase) {
CHECK_EQ(Min(upper, lower), test);
}
}
+
+
+class DummyResource: public v8::String::ExternalStringResource {
+ public:
+ virtual const uint16_t* data() const { return NULL; }
+ virtual size_t length() const { return 1 << 30; }
+};
+
+
+class DummyOneByteResource: public v8::String::ExternalOneByteStringResource {
+ public:
+ virtual const char* data() const { return NULL; }
+ virtual size_t length() const { return 1 << 30; }
+};
+
+
+TEST(InvalidExternalString) {
+ CcTest::InitializeVM();
+ LocalContext context;
+ Isolate* isolate = CcTest::i_isolate();
+ { HandleScope scope(isolate);
+ DummyOneByteResource r;
+ CHECK(isolate->factory()->NewExternalStringFromAscii(&r).is_null());
+ CHECK(isolate->has_pending_exception());
+ isolate->clear_pending_exception();
+ }
+
+ { HandleScope scope(isolate);
+ DummyResource r;
+ CHECK(isolate->factory()->NewExternalStringFromTwoByte(&r).is_null());
+ CHECK(isolate->has_pending_exception());
+ isolate->clear_pending_exception();
+ }
+}
+
+
+#define INVALID_STRING_TEST(FUN, TYPE) \
+ TEST(StringOOM##FUN) { \
+ CcTest::InitializeVM(); \
+ LocalContext context; \
+ Isolate* isolate = CcTest::i_isolate(); \
+ STATIC_ASSERT(String::kMaxLength < kMaxInt); \
+ static const int invalid = String::kMaxLength + 1; \
+ HandleScope scope(isolate); \
+ Vector<TYPE> dummy = Vector<TYPE>::New(invalid); \
+ CHECK(isolate->factory()->FUN(Vector<const TYPE>::cast(dummy)).is_null()); \
+ memset(dummy.start(), 0x20, dummy.length() * sizeof(TYPE)); \
+ CHECK(isolate->has_pending_exception()); \
+ isolate->clear_pending_exception(); \
+ dummy.Dispose(); \
+ }
+
+INVALID_STRING_TEST(NewStringFromAscii, char)
+INVALID_STRING_TEST(NewStringFromUtf8, char)
+INVALID_STRING_TEST(NewStringFromOneByte, uint8_t)
+INVALID_STRING_TEST(InternalizeOneByteString, uint8_t)
+INVALID_STRING_TEST(InternalizeUtf8String, char)
+
+#undef INVALID_STRING_TEST
diff --git a/deps/v8/test/cctest/test-symbols.cc b/deps/v8/test/cctest/test-symbols.cc
index a04ffa70c..6fceea613 100644
--- a/deps/v8/test/cctest/test-symbols.cc
+++ b/deps/v8/test/cctest/test-symbols.cc
@@ -37,7 +37,7 @@ TEST(Create) {
#endif
}
- CcTest::heap()->PerformScavenge();
+ CcTest::heap()->CollectGarbage(i::NEW_SPACE);
CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
// All symbols should be distinct.
diff --git a/deps/v8/test/cctest/test-types.cc b/deps/v8/test/cctest/test-types.cc
index d29ee4110..326bd1b56 100644
--- a/deps/v8/test/cctest/test-types.cc
+++ b/deps/v8/test/cctest/test-types.cc
@@ -34,6 +34,8 @@ template<class Type, class TypeHandle, class Region>
class Types {
public:
Types(Region* region, Isolate* isolate) :
+ Representation(Type::Representation(region)),
+ Semantic(Type::Semantic(region)),
None(Type::None(region)),
Any(Type::Any(region)),
Oddball(Type::Oddball(region)),
@@ -41,9 +43,9 @@ class Types {
Null(Type::Null(region)),
Undefined(Type::Undefined(region)),
Number(Type::Number(region)),
- Smi(Type::Smi(region)),
+ SignedSmall(Type::SignedSmall(region)),
Signed32(Type::Signed32(region)),
- Double(Type::Double(region)),
+ Float(Type::Float(region)),
Name(Type::Name(region)),
UniqueName(Type::UniqueName(region)),
String(Type::String(region)),
@@ -72,6 +74,8 @@ class Types {
ArrayConstant2 = Type::Constant(array, region);
}
+ TypeHandle Representation;
+ TypeHandle Semantic;
TypeHandle None;
TypeHandle Any;
TypeHandle Oddball;
@@ -79,9 +83,9 @@ class Types {
TypeHandle Null;
TypeHandle Undefined;
TypeHandle Number;
- TypeHandle Smi;
+ TypeHandle SignedSmall;
TypeHandle Signed32;
- TypeHandle Double;
+ TypeHandle Float;
TypeHandle Name;
TypeHandle UniqueName;
TypeHandle String;
@@ -190,10 +194,10 @@ struct ZoneRep {
return static_cast<int>(reinterpret_cast<intptr_t>(t) >> 1);
}
static Map* AsClass(Type* t) {
- return *reinterpret_cast<Map**>(AsTagged(t)->at(1));
+ return *reinterpret_cast<Map**>(AsTagged(t)->at(2));
}
static Object* AsConstant(Type* t) {
- return *reinterpret_cast<Object**>(AsTagged(t)->at(1));
+ return *reinterpret_cast<Object**>(AsTagged(t)->at(2));
}
static ZoneList<Type*>* AsUnion(Type* t) {
return reinterpret_cast<ZoneList<Type*>*>(AsTagged(t));
@@ -236,7 +240,7 @@ struct Tests : Rep {
T(Rep::ToRegion(&zone, isolate), isolate) {
}
- static void CheckEqual(TypeHandle type1, TypeHandle type2) {
+ void CheckEqual(TypeHandle type1, TypeHandle type2) {
CHECK_EQ(Rep::IsBitset(type1), Rep::IsBitset(type2));
CHECK_EQ(Rep::IsClass(type1), Rep::IsClass(type2));
CHECK_EQ(Rep::IsConstant(type1), Rep::IsConstant(type2));
@@ -256,7 +260,7 @@ struct Tests : Rep {
CHECK(type2->Is(type1));
}
- static void CheckSub(TypeHandle type1, TypeHandle type2) {
+ void CheckSub(TypeHandle type1, TypeHandle type2) {
CHECK(type1->Is(type2));
CHECK(!type2->Is(type1));
if (Rep::IsBitset(type1) && Rep::IsBitset(type2)) {
@@ -264,7 +268,7 @@ struct Tests : Rep {
}
}
- static void CheckUnordered(TypeHandle type1, TypeHandle type2) {
+ void CheckUnordered(TypeHandle type1, TypeHandle type2) {
CHECK(!type1->Is(type2));
CHECK(!type2->Is(type1));
if (Rep::IsBitset(type1) && Rep::IsBitset(type2)) {
@@ -272,21 +276,23 @@ struct Tests : Rep {
}
}
- static void CheckOverlap(TypeHandle type1, TypeHandle type2) {
+ void CheckOverlap(TypeHandle type1, TypeHandle type2, TypeHandle mask) {
CHECK(type1->Maybe(type2));
CHECK(type2->Maybe(type1));
if (Rep::IsBitset(type1) && Rep::IsBitset(type2)) {
- CHECK_NE(0, Rep::AsBitset(type1) & Rep::AsBitset(type2));
+ CHECK_NE(0,
+ Rep::AsBitset(type1) & Rep::AsBitset(type2) & Rep::AsBitset(mask));
}
}
- static void CheckDisjoint(TypeHandle type1, TypeHandle type2) {
+ void CheckDisjoint(TypeHandle type1, TypeHandle type2, TypeHandle mask) {
CHECK(!type1->Is(type2));
CHECK(!type2->Is(type1));
CHECK(!type1->Maybe(type2));
CHECK(!type2->Maybe(type1));
if (Rep::IsBitset(type1) && Rep::IsBitset(type2)) {
- CHECK_EQ(0, Rep::AsBitset(type1) & Rep::AsBitset(type2));
+ CHECK_EQ(0,
+ Rep::AsBitset(type1) & Rep::AsBitset(type2) & Rep::AsBitset(mask));
}
}
@@ -300,10 +306,12 @@ struct Tests : Rep {
CHECK(this->IsBitset(T.Union(T.String, T.Receiver)));
CHECK_EQ(0, this->AsBitset(T.None));
- CHECK_EQ(this->AsBitset(T.Number) | this->AsBitset(T.String),
- this->AsBitset(T.Union(T.String, T.Number)));
- CHECK_EQ(this->AsBitset(T.Receiver),
- this->AsBitset(T.Union(T.Receiver, T.Object)));
+ CHECK_EQ(
+ this->AsBitset(T.Number) | this->AsBitset(T.String),
+ this->AsBitset(T.Union(T.String, T.Number)));
+ CHECK_EQ(
+ this->AsBitset(T.Receiver),
+ this->AsBitset(T.Union(T.Receiver, T.Object)));
}
void Class() {
@@ -352,12 +360,12 @@ struct Tests : Rep {
CheckUnordered(T.Boolean, T.Undefined);
CheckSub(T.Number, T.Any);
- CheckSub(T.Smi, T.Number);
+ CheckSub(T.SignedSmall, T.Number);
CheckSub(T.Signed32, T.Number);
- CheckSub(T.Double, T.Number);
- CheckSub(T.Smi, T.Signed32);
- CheckUnordered(T.Smi, T.Double);
- CheckUnordered(T.Signed32, T.Double);
+ CheckSub(T.Float, T.Number);
+ CheckSub(T.SignedSmall, T.Signed32);
+ CheckUnordered(T.SignedSmall, T.Float);
+ CheckUnordered(T.Signed32, T.Float);
CheckSub(T.Name, T.Any);
CheckSub(T.UniqueName, T.Any);
@@ -391,7 +399,7 @@ struct Tests : Rep {
CheckSub(T.ArrayClass, T.Object);
CheckUnordered(T.ObjectClass, T.ArrayClass);
- CheckSub(T.SmiConstant, T.Smi);
+ CheckSub(T.SmiConstant, T.SignedSmall);
CheckSub(T.SmiConstant, T.Signed32);
CheckSub(T.SmiConstant, T.Number);
CheckSub(T.ObjectConstant1, T.Object);
@@ -409,71 +417,71 @@ struct Tests : Rep {
}
void Maybe() {
- CheckOverlap(T.Any, T.Any);
- CheckOverlap(T.Object, T.Object);
-
- CheckOverlap(T.Oddball, T.Any);
- CheckOverlap(T.Boolean, T.Oddball);
- CheckOverlap(T.Null, T.Oddball);
- CheckOverlap(T.Undefined, T.Oddball);
- CheckDisjoint(T.Boolean, T.Null);
- CheckDisjoint(T.Undefined, T.Null);
- CheckDisjoint(T.Boolean, T.Undefined);
-
- CheckOverlap(T.Number, T.Any);
- CheckOverlap(T.Smi, T.Number);
- CheckOverlap(T.Double, T.Number);
- CheckDisjoint(T.Signed32, T.Double);
-
- CheckOverlap(T.Name, T.Any);
- CheckOverlap(T.UniqueName, T.Any);
- CheckOverlap(T.UniqueName, T.Name);
- CheckOverlap(T.String, T.Name);
- CheckOverlap(T.InternalizedString, T.String);
- CheckOverlap(T.InternalizedString, T.UniqueName);
- CheckOverlap(T.InternalizedString, T.Name);
- CheckOverlap(T.Symbol, T.UniqueName);
- CheckOverlap(T.Symbol, T.Name);
- CheckOverlap(T.String, T.UniqueName);
- CheckDisjoint(T.String, T.Symbol);
- CheckDisjoint(T.InternalizedString, T.Symbol);
-
- CheckOverlap(T.Receiver, T.Any);
- CheckOverlap(T.Object, T.Any);
- CheckOverlap(T.Object, T.Receiver);
- CheckOverlap(T.Array, T.Object);
- CheckOverlap(T.Function, T.Object);
- CheckOverlap(T.Proxy, T.Receiver);
- CheckDisjoint(T.Object, T.Proxy);
- CheckDisjoint(T.Array, T.Function);
-
- CheckOverlap(T.ObjectClass, T.Any);
- CheckOverlap(T.ObjectConstant1, T.Any);
-
- CheckOverlap(T.ObjectClass, T.Object);
- CheckOverlap(T.ArrayClass, T.Object);
- CheckOverlap(T.ObjectClass, T.ObjectClass);
- CheckOverlap(T.ArrayClass, T.ArrayClass);
- CheckDisjoint(T.ObjectClass, T.ArrayClass);
-
- CheckOverlap(T.SmiConstant, T.Smi);
- CheckOverlap(T.SmiConstant, T.Signed32);
- CheckOverlap(T.SmiConstant, T.Number);
- CheckDisjoint(T.SmiConstant, T.Double);
- CheckOverlap(T.ObjectConstant1, T.Object);
- CheckOverlap(T.ObjectConstant2, T.Object);
- CheckOverlap(T.ArrayConstant1, T.Object);
- CheckOverlap(T.ArrayConstant1, T.Array);
- CheckOverlap(T.ArrayConstant1, T.ArrayConstant2);
- CheckOverlap(T.ObjectConstant1, T.ObjectConstant1);
- CheckDisjoint(T.ObjectConstant1, T.ObjectConstant2);
- CheckDisjoint(T.ObjectConstant1, T.ArrayConstant1);
-
- CheckDisjoint(T.ObjectConstant1, T.ObjectClass);
- CheckDisjoint(T.ObjectConstant2, T.ObjectClass);
- CheckDisjoint(T.ObjectConstant1, T.ArrayClass);
- CheckDisjoint(T.ObjectConstant2, T.ArrayClass);
- CheckDisjoint(T.ArrayConstant1, T.ObjectClass);
+ CheckOverlap(T.Any, T.Any, T.Semantic);
+ CheckOverlap(T.Object, T.Object, T.Semantic);
+
+ CheckOverlap(T.Oddball, T.Any, T.Semantic);
+ CheckOverlap(T.Boolean, T.Oddball, T.Semantic);
+ CheckOverlap(T.Null, T.Oddball, T.Semantic);
+ CheckOverlap(T.Undefined, T.Oddball, T.Semantic);
+ CheckDisjoint(T.Boolean, T.Null, T.Semantic);
+ CheckDisjoint(T.Undefined, T.Null, T.Semantic);
+ CheckDisjoint(T.Boolean, T.Undefined, T.Semantic);
+
+ CheckOverlap(T.Number, T.Any, T.Semantic);
+ CheckOverlap(T.SignedSmall, T.Number, T.Semantic);
+ CheckOverlap(T.Float, T.Number, T.Semantic);
+ CheckDisjoint(T.Signed32, T.Float, T.Semantic);
+
+ CheckOverlap(T.Name, T.Any, T.Semantic);
+ CheckOverlap(T.UniqueName, T.Any, T.Semantic);
+ CheckOverlap(T.UniqueName, T.Name, T.Semantic);
+ CheckOverlap(T.String, T.Name, T.Semantic);
+ CheckOverlap(T.InternalizedString, T.String, T.Semantic);
+ CheckOverlap(T.InternalizedString, T.UniqueName, T.Semantic);
+ CheckOverlap(T.InternalizedString, T.Name, T.Semantic);
+ CheckOverlap(T.Symbol, T.UniqueName, T.Semantic);
+ CheckOverlap(T.Symbol, T.Name, T.Semantic);
+ CheckOverlap(T.String, T.UniqueName, T.Semantic);
+ CheckDisjoint(T.String, T.Symbol, T.Semantic);
+ CheckDisjoint(T.InternalizedString, T.Symbol, T.Semantic);
+
+ CheckOverlap(T.Receiver, T.Any, T.Semantic);
+ CheckOverlap(T.Object, T.Any, T.Semantic);
+ CheckOverlap(T.Object, T.Receiver, T.Semantic);
+ CheckOverlap(T.Array, T.Object, T.Semantic);
+ CheckOverlap(T.Function, T.Object, T.Semantic);
+ CheckOverlap(T.Proxy, T.Receiver, T.Semantic);
+ CheckDisjoint(T.Object, T.Proxy, T.Semantic);
+ CheckDisjoint(T.Array, T.Function, T.Semantic);
+
+ CheckOverlap(T.ObjectClass, T.Any, T.Semantic);
+ CheckOverlap(T.ObjectConstant1, T.Any, T.Semantic);
+
+ CheckOverlap(T.ObjectClass, T.Object, T.Semantic);
+ CheckOverlap(T.ArrayClass, T.Object, T.Semantic);
+ CheckOverlap(T.ObjectClass, T.ObjectClass, T.Semantic);
+ CheckOverlap(T.ArrayClass, T.ArrayClass, T.Semantic);
+ CheckDisjoint(T.ObjectClass, T.ArrayClass, T.Semantic);
+
+ CheckOverlap(T.SmiConstant, T.SignedSmall, T.Semantic);
+ CheckOverlap(T.SmiConstant, T.Signed32, T.Semantic);
+ CheckOverlap(T.SmiConstant, T.Number, T.Semantic);
+ CheckDisjoint(T.SmiConstant, T.Float, T.Semantic);
+ CheckOverlap(T.ObjectConstant1, T.Object, T.Semantic);
+ CheckOverlap(T.ObjectConstant2, T.Object, T.Semantic);
+ CheckOverlap(T.ArrayConstant1, T.Object, T.Semantic);
+ CheckOverlap(T.ArrayConstant1, T.Array, T.Semantic);
+ CheckOverlap(T.ArrayConstant1, T.ArrayConstant2, T.Semantic);
+ CheckOverlap(T.ObjectConstant1, T.ObjectConstant1, T.Semantic);
+ CheckDisjoint(T.ObjectConstant1, T.ObjectConstant2, T.Semantic);
+ CheckDisjoint(T.ObjectConstant1, T.ArrayConstant1, T.Semantic);
+
+ CheckDisjoint(T.ObjectConstant1, T.ObjectClass, T.Semantic);
+ CheckDisjoint(T.ObjectConstant2, T.ObjectClass, T.Semantic);
+ CheckDisjoint(T.ObjectConstant1, T.ArrayClass, T.Semantic);
+ CheckDisjoint(T.ObjectConstant2, T.ArrayClass, T.Semantic);
+ CheckDisjoint(T.ArrayConstant1, T.ObjectClass, T.Semantic);
}
void Union() {
@@ -498,8 +506,8 @@ struct Tests : Rep {
CheckSub(T.ArrayClass, T.Union(T.ObjectClass, T.ArrayClass));
CheckSub(T.Union(T.ObjectClass, T.ArrayClass), T.Object);
CheckUnordered(T.Union(T.ObjectClass, T.ArrayClass), T.Array);
- CheckOverlap(T.Union(T.ObjectClass, T.ArrayClass), T.Array);
- CheckDisjoint(T.Union(T.ObjectClass, T.ArrayClass), T.Number);
+ CheckOverlap(T.Union(T.ObjectClass, T.ArrayClass), T.Array, T.Semantic);
+ CheckDisjoint(T.Union(T.ObjectClass, T.ArrayClass), T.Number, T.Semantic);
// Constant-constant
CHECK(this->IsConstant(T.Union(T.ObjectConstant1, T.ObjectConstant1)));
@@ -520,11 +528,16 @@ struct Tests : Rep {
CheckUnordered(
T.Union(T.ObjectConstant1, T.ObjectConstant2), T.ObjectClass);
CheckUnordered(T.Union(T.ObjectConstant1, T.ArrayConstant1), T.Array);
- CheckOverlap(T.Union(T.ObjectConstant1, T.ArrayConstant1), T.Array);
CheckOverlap(
- T.Union(T.ObjectConstant1, T.ArrayConstant1), T.ArrayConstant2);
- CheckDisjoint(T.Union(T.ObjectConstant1, T.ArrayConstant1), T.Number);
- CheckDisjoint(T.Union(T.ObjectConstant1, T.ArrayConstant1), T.ObjectClass);
+ T.Union(T.ObjectConstant1, T.ArrayConstant1), T.Array, T.Semantic);
+ CheckOverlap(
+ T.Union(T.ObjectConstant1, T.ArrayConstant1), T.ArrayConstant2,
+ T.Semantic);
+ CheckDisjoint(
+ T.Union(T.ObjectConstant1, T.ArrayConstant1), T.Number, T.Semantic);
+ CheckDisjoint(
+ T.Union(T.ObjectConstant1, T.ArrayConstant1), T.ObjectClass,
+ T.Semantic);
// Bitset-class
CHECK(this->IsBitset(T.Union(T.ObjectClass, T.Object)));
@@ -533,11 +546,12 @@ struct Tests : Rep {
CheckEqual(T.Union(T.ObjectClass, T.Object), T.Object);
CheckSub(T.None, T.Union(T.ObjectClass, T.Number));
CheckSub(T.Union(T.ObjectClass, T.Number), T.Any);
- CheckSub(T.Union(T.ObjectClass, T.Smi), T.Union(T.Object, T.Number));
+ CheckSub(
+ T.Union(T.ObjectClass, T.SignedSmall), T.Union(T.Object, T.Number));
CheckSub(T.Union(T.ObjectClass, T.Array), T.Object);
CheckUnordered(T.Union(T.ObjectClass, T.String), T.Array);
- CheckOverlap(T.Union(T.ObjectClass, T.String), T.Object);
- CheckDisjoint(T.Union(T.ObjectClass, T.String), T.Number);
+ CheckOverlap(T.Union(T.ObjectClass, T.String), T.Object, T.Semantic);
+ CheckDisjoint(T.Union(T.ObjectClass, T.String), T.Number, T.Semantic);
// Bitset-constant
CHECK(this->IsBitset(T.Union(T.SmiConstant, T.Number)));
@@ -552,8 +566,8 @@ struct Tests : Rep {
T.Union(T.ObjectConstant1, T.Signed32), T.Union(T.Object, T.Number));
CheckSub(T.Union(T.ObjectConstant1, T.Array), T.Object);
CheckUnordered(T.Union(T.ObjectConstant1, T.String), T.Array);
- CheckOverlap(T.Union(T.ObjectConstant1, T.String), T.Object);
- CheckDisjoint(T.Union(T.ObjectConstant1, T.String), T.Number);
+ CheckOverlap(T.Union(T.ObjectConstant1, T.String), T.Object, T.Semantic);
+ CheckDisjoint(T.Union(T.ObjectConstant1, T.String), T.Number, T.Semantic);
CheckEqual(T.Union(T.Signed32, T.Signed32Constant), T.Signed32);
// Class-constant
@@ -569,8 +583,11 @@ struct Tests : Rep {
CheckSub(
T.Union(T.ObjectConstant1, T.ArrayClass), T.Union(T.Array, T.Object));
CheckUnordered(T.Union(T.ObjectConstant1, T.ArrayClass), T.ArrayConstant1);
- CheckDisjoint(T.Union(T.ObjectConstant1, T.ArrayClass), T.ObjectConstant2);
- CheckDisjoint(T.Union(T.ObjectConstant1, T.ArrayClass), T.ObjectClass);
+ CheckDisjoint(
+ T.Union(T.ObjectConstant1, T.ArrayClass), T.ObjectConstant2,
+ T.Semantic);
+ CheckDisjoint(
+ T.Union(T.ObjectConstant1, T.ArrayClass), T.ObjectClass, T.Semantic);
// Bitset-union
CHECK(this->IsBitset(
@@ -585,19 +602,19 @@ struct Tests : Rep {
T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Number),
T.Union(T.ObjectConstant1, T.Union(T.Number, T.ArrayClass)));
CheckSub(
- T.Double,
+ T.Float,
T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Number));
CheckSub(
T.ObjectConstant1,
- T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Double));
+ T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Float));
CheckSub(
T.None,
- T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Double));
+ T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Float));
CheckSub(
- T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Double),
+ T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Float),
T.Any);
CheckSub(
- T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Double),
+ T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Float),
T.Union(T.ObjectConstant1, T.Union(T.Number, T.ArrayClass)));
// Class-union
@@ -661,7 +678,9 @@ struct Tests : Rep {
T.Union(T.ObjectConstant1, T.ObjectConstant2),
T.ArrayConstant1));
CheckEqual(
- T.Union(T.Union(T.Number, T.ArrayClass), T.Union(T.Smi, T.Array)),
+ T.Union(
+ T.Union(T.Number, T.ArrayClass),
+ T.Union(T.SignedSmall, T.Array)),
T.Union(T.Number, T.Array));
}
@@ -672,7 +691,7 @@ struct Tests : Rep {
CHECK(this->IsBitset(T.Intersect(T.Any, T.None)));
CheckEqual(T.Intersect(T.None, T.Number), T.None);
- CheckEqual(T.Intersect(T.Object, T.Proxy), T.None);
+ CheckSub(T.Intersect(T.Object, T.Proxy), T.Representation);
CheckEqual(T.Intersect(T.Name, T.String), T.Intersect(T.String, T.Name));
CheckEqual(T.Intersect(T.UniqueName, T.String), T.InternalizedString);
@@ -699,15 +718,15 @@ struct Tests : Rep {
CHECK(this->IsBitset(T.Intersect(T.ObjectClass, T.Number)));
CheckEqual(T.Intersect(T.ObjectClass, T.Object), T.ObjectClass);
- CheckEqual(T.Intersect(T.ObjectClass, T.Array), T.None);
- CheckEqual(T.Intersect(T.ObjectClass, T.Number), T.None);
+ CheckSub(T.Intersect(T.ObjectClass, T.Array), T.Representation);
+ CheckSub(T.Intersect(T.ObjectClass, T.Number), T.Representation);
// Bitset-constant
- CHECK(this->IsBitset(T.Intersect(T.Smi, T.Number)));
+ CHECK(this->IsBitset(T.Intersect(T.SignedSmall, T.Number)));
CHECK(this->IsConstant(T.Intersect(T.SmiConstant, T.Number)));
CHECK(this->IsConstant(T.Intersect(T.ObjectConstant1, T.Object)));
- CheckEqual(T.Intersect(T.Smi, T.Number), T.Smi);
+ CheckEqual(T.Intersect(T.SignedSmall, T.Number), T.SignedSmall);
CheckEqual(T.Intersect(T.SmiConstant, T.Number), T.SmiConstant);
CheckEqual(T.Intersect(T.ObjectConstant1, T.Object), T.ObjectConstant1);
@@ -778,8 +797,8 @@ struct Tests : Rep {
CheckEqual(
T.Intersect(
T.Union(T.Number, T.ArrayClass),
- T.Union(T.Smi, T.Array)),
- T.Union(T.Smi, T.ArrayClass));
+ T.Union(T.SignedSmall, T.Array)),
+ T.Union(T.SignedSmall, T.ArrayClass));
CheckEqual(
T.Intersect(
T.Union(T.Number, T.ObjectClass),
diff --git a/deps/v8/test/cctest/test-utils-arm64.cc b/deps/v8/test/cctest/test-utils-arm64.cc
new file mode 100644
index 000000000..9eb32b002
--- /dev/null
+++ b/deps/v8/test/cctest/test-utils-arm64.cc
@@ -0,0 +1,425 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "macro-assembler.h"
+#include "arm64/utils-arm64.h"
+#include "cctest.h"
+#include "test-utils-arm64.h"
+
+using namespace v8::internal;
+
+
+#define __ masm->
+
+
+bool Equal32(uint32_t expected, const RegisterDump*, uint32_t result) {
+ if (result != expected) {
+ printf("Expected 0x%08" PRIx32 "\t Found 0x%08" PRIx32 "\n",
+ expected, result);
+ }
+
+ return expected == result;
+}
+
+
+bool Equal64(uint64_t expected, const RegisterDump*, uint64_t result) {
+ if (result != expected) {
+ printf("Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
+ expected, result);
+ }
+
+ return expected == result;
+}
+
+
+bool EqualFP32(float expected, const RegisterDump*, float result) {
+ if (float_to_rawbits(expected) == float_to_rawbits(result)) {
+ return true;
+ } else {
+ if (std::isnan(expected) || (expected == 0.0)) {
+ printf("Expected 0x%08" PRIx32 "\t Found 0x%08" PRIx32 "\n",
+ float_to_rawbits(expected), float_to_rawbits(result));
+ } else {
+ printf("Expected %.9f (0x%08" PRIx32 ")\t "
+ "Found %.9f (0x%08" PRIx32 ")\n",
+ expected, float_to_rawbits(expected),
+ result, float_to_rawbits(result));
+ }
+ return false;
+ }
+}
+
+
+bool EqualFP64(double expected, const RegisterDump*, double result) {
+ if (double_to_rawbits(expected) == double_to_rawbits(result)) {
+ return true;
+ }
+
+ if (std::isnan(expected) || (expected == 0.0)) {
+ printf("Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
+ double_to_rawbits(expected), double_to_rawbits(result));
+ } else {
+ printf("Expected %.17f (0x%016" PRIx64 ")\t "
+ "Found %.17f (0x%016" PRIx64 ")\n",
+ expected, double_to_rawbits(expected),
+ result, double_to_rawbits(result));
+ }
+ return false;
+}
+
+
+bool Equal32(uint32_t expected, const RegisterDump* core, const Register& reg) {
+ ASSERT(reg.Is32Bits());
+ // Retrieve the corresponding X register so we can check that the upper part
+ // was properly cleared.
+ int64_t result_x = core->xreg(reg.code());
+ if ((result_x & 0xffffffff00000000L) != 0) {
+ printf("Expected 0x%08" PRIx32 "\t Found 0x%016" PRIx64 "\n",
+ expected, result_x);
+ return false;
+ }
+ uint32_t result_w = core->wreg(reg.code());
+ return Equal32(expected, core, result_w);
+}
+
+
+bool Equal64(uint64_t expected,
+ const RegisterDump* core,
+ const Register& reg) {
+ ASSERT(reg.Is64Bits());
+ uint64_t result = core->xreg(reg.code());
+ return Equal64(expected, core, result);
+}
+
+
+bool EqualFP32(float expected,
+ const RegisterDump* core,
+ const FPRegister& fpreg) {
+ ASSERT(fpreg.Is32Bits());
+ // Retrieve the corresponding D register so we can check that the upper part
+ // was properly cleared.
+ uint64_t result_64 = core->dreg_bits(fpreg.code());
+ if ((result_64 & 0xffffffff00000000L) != 0) {
+ printf("Expected 0x%08" PRIx32 " (%f)\t Found 0x%016" PRIx64 "\n",
+ float_to_rawbits(expected), expected, result_64);
+ return false;
+ }
+
+ return EqualFP32(expected, core, core->sreg(fpreg.code()));
+}
+
+
+bool EqualFP64(double expected,
+ const RegisterDump* core,
+ const FPRegister& fpreg) {
+ ASSERT(fpreg.Is64Bits());
+ return EqualFP64(expected, core, core->dreg(fpreg.code()));
+}
+
+
+bool Equal64(const Register& reg0,
+ const RegisterDump* core,
+ const Register& reg1) {
+ ASSERT(reg0.Is64Bits() && reg1.Is64Bits());
+ int64_t expected = core->xreg(reg0.code());
+ int64_t result = core->xreg(reg1.code());
+ return Equal64(expected, core, result);
+}
+
+
+static char FlagN(uint32_t flags) {
+ return (flags & NFlag) ? 'N' : 'n';
+}
+
+
+static char FlagZ(uint32_t flags) {
+ return (flags & ZFlag) ? 'Z' : 'z';
+}
+
+
+static char FlagC(uint32_t flags) {
+ return (flags & CFlag) ? 'C' : 'c';
+}
+
+
+static char FlagV(uint32_t flags) {
+ return (flags & VFlag) ? 'V' : 'v';
+}
+
+
+bool EqualNzcv(uint32_t expected, uint32_t result) {
+ ASSERT((expected & ~NZCVFlag) == 0);
+ ASSERT((result & ~NZCVFlag) == 0);
+ if (result != expected) {
+ printf("Expected: %c%c%c%c\t Found: %c%c%c%c\n",
+ FlagN(expected), FlagZ(expected), FlagC(expected), FlagV(expected),
+ FlagN(result), FlagZ(result), FlagC(result), FlagV(result));
+ return false;
+ }
+
+ return true;
+}
+
+
+bool EqualRegisters(const RegisterDump* a, const RegisterDump* b) {
+ for (unsigned i = 0; i < kNumberOfRegisters; i++) {
+ if (a->xreg(i) != b->xreg(i)) {
+ printf("x%d\t Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
+ i, a->xreg(i), b->xreg(i));
+ return false;
+ }
+ }
+
+ for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
+ uint64_t a_bits = a->dreg_bits(i);
+ uint64_t b_bits = b->dreg_bits(i);
+ if (a_bits != b_bits) {
+ printf("d%d\t Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
+ i, a_bits, b_bits);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+
+RegList PopulateRegisterArray(Register* w, Register* x, Register* r,
+ int reg_size, int reg_count, RegList allowed) {
+ RegList list = 0;
+ int i = 0;
+ for (unsigned n = 0; (n < kNumberOfRegisters) && (i < reg_count); n++) {
+ if (((1UL << n) & allowed) != 0) {
+ // Only assign allowed registers.
+ if (r) {
+ r[i] = Register::Create(n, reg_size);
+ }
+ if (x) {
+ x[i] = Register::Create(n, kXRegSizeInBits);
+ }
+ if (w) {
+ w[i] = Register::Create(n, kWRegSizeInBits);
+ }
+ list |= (1UL << n);
+ i++;
+ }
+ }
+ // Check that we got enough registers.
+ ASSERT(CountSetBits(list, kNumberOfRegisters) == reg_count);
+
+ return list;
+}
+
+
+RegList PopulateFPRegisterArray(FPRegister* s, FPRegister* d, FPRegister* v,
+ int reg_size, int reg_count, RegList allowed) {
+ RegList list = 0;
+ int i = 0;
+ for (unsigned n = 0; (n < kNumberOfFPRegisters) && (i < reg_count); n++) {
+ if (((1UL << n) & allowed) != 0) {
+ // Only assigned allowed registers.
+ if (v) {
+ v[i] = FPRegister::Create(n, reg_size);
+ }
+ if (d) {
+ d[i] = FPRegister::Create(n, kDRegSizeInBits);
+ }
+ if (s) {
+ s[i] = FPRegister::Create(n, kSRegSizeInBits);
+ }
+ list |= (1UL << n);
+ i++;
+ }
+ }
+ // Check that we got enough registers.
+ ASSERT(CountSetBits(list, kNumberOfFPRegisters) == reg_count);
+
+ return list;
+}
+
+
+void Clobber(MacroAssembler* masm, RegList reg_list, uint64_t const value) {
+ Register first = NoReg;
+ for (unsigned i = 0; i < kNumberOfRegisters; i++) {
+ if (reg_list & (1UL << i)) {
+ Register xn = Register::Create(i, kXRegSizeInBits);
+ // We should never write into csp here.
+ ASSERT(!xn.Is(csp));
+ if (!xn.IsZero()) {
+ if (!first.IsValid()) {
+ // This is the first register we've hit, so construct the literal.
+ __ Mov(xn, value);
+ first = xn;
+ } else {
+ // We've already loaded the literal, so re-use the value already
+ // loaded into the first register we hit.
+ __ Mov(xn, first);
+ }
+ }
+ }
+ }
+}
+
+
+void ClobberFP(MacroAssembler* masm, RegList reg_list, double const value) {
+ FPRegister first = NoFPReg;
+ for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
+ if (reg_list & (1UL << i)) {
+ FPRegister dn = FPRegister::Create(i, kDRegSizeInBits);
+ if (!first.IsValid()) {
+ // This is the first register we've hit, so construct the literal.
+ __ Fmov(dn, value);
+ first = dn;
+ } else {
+ // We've already loaded the literal, so re-use the value already loaded
+ // into the first register we hit.
+ __ Fmov(dn, first);
+ }
+ }
+ }
+}
+
+
+void Clobber(MacroAssembler* masm, CPURegList reg_list) {
+ if (reg_list.type() == CPURegister::kRegister) {
+ // This will always clobber X registers.
+ Clobber(masm, reg_list.list());
+ } else if (reg_list.type() == CPURegister::kFPRegister) {
+ // This will always clobber D registers.
+ ClobberFP(masm, reg_list.list());
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void RegisterDump::Dump(MacroAssembler* masm) {
+ ASSERT(__ StackPointer().Is(csp));
+
+ // Ensure that we don't unintentionally clobber any registers.
+ RegList old_tmp_list = masm->TmpList()->list();
+ RegList old_fptmp_list = masm->FPTmpList()->list();
+ masm->TmpList()->set_list(0);
+ masm->FPTmpList()->set_list(0);
+
+ // Preserve some temporary registers.
+ Register dump_base = x0;
+ Register dump = x1;
+ Register tmp = x2;
+ Register dump_base_w = dump_base.W();
+ Register dump_w = dump.W();
+ Register tmp_w = tmp.W();
+
+ // Offsets into the dump_ structure.
+ const int x_offset = offsetof(dump_t, x_);
+ const int w_offset = offsetof(dump_t, w_);
+ const int d_offset = offsetof(dump_t, d_);
+ const int s_offset = offsetof(dump_t, s_);
+ const int sp_offset = offsetof(dump_t, sp_);
+ const int wsp_offset = offsetof(dump_t, wsp_);
+ const int flags_offset = offsetof(dump_t, flags_);
+
+ __ Push(xzr, dump_base, dump, tmp);
+
+ // Load the address where we will dump the state.
+ __ Mov(dump_base, reinterpret_cast<uint64_t>(&dump_));
+
+ // Dump the stack pointer (csp and wcsp).
+ // The stack pointer cannot be stored directly; it needs to be moved into
+ // another register first. Also, we pushed four X registers, so we need to
+ // compensate here.
+ __ Add(tmp, csp, 4 * kXRegSize);
+ __ Str(tmp, MemOperand(dump_base, sp_offset));
+ __ Add(tmp_w, wcsp, 4 * kXRegSize);
+ __ Str(tmp_w, MemOperand(dump_base, wsp_offset));
+
+ // Dump X registers.
+ __ Add(dump, dump_base, x_offset);
+ for (unsigned i = 0; i < kNumberOfRegisters; i += 2) {
+ __ Stp(Register::XRegFromCode(i), Register::XRegFromCode(i + 1),
+ MemOperand(dump, i * kXRegSize));
+ }
+
+ // Dump W registers.
+ __ Add(dump, dump_base, w_offset);
+ for (unsigned i = 0; i < kNumberOfRegisters; i += 2) {
+ __ Stp(Register::WRegFromCode(i), Register::WRegFromCode(i + 1),
+ MemOperand(dump, i * kWRegSize));
+ }
+
+ // Dump D registers.
+ __ Add(dump, dump_base, d_offset);
+ for (unsigned i = 0; i < kNumberOfFPRegisters; i += 2) {
+ __ Stp(FPRegister::DRegFromCode(i), FPRegister::DRegFromCode(i + 1),
+ MemOperand(dump, i * kDRegSize));
+ }
+
+ // Dump S registers.
+ __ Add(dump, dump_base, s_offset);
+ for (unsigned i = 0; i < kNumberOfFPRegisters; i += 2) {
+ __ Stp(FPRegister::SRegFromCode(i), FPRegister::SRegFromCode(i + 1),
+ MemOperand(dump, i * kSRegSize));
+ }
+
+ // Dump the flags.
+ __ Mrs(tmp, NZCV);
+ __ Str(tmp, MemOperand(dump_base, flags_offset));
+
+ // To dump the values that were in tmp amd dump, we need a new scratch
+ // register. We can use any of the already dumped registers since we can
+ // easily restore them.
+ Register dump2_base = x10;
+ Register dump2 = x11;
+ ASSERT(!AreAliased(dump_base, dump, tmp, dump2_base, dump2));
+
+ // Don't lose the dump_ address.
+ __ Mov(dump2_base, dump_base);
+
+ __ Pop(tmp, dump, dump_base, xzr);
+
+ __ Add(dump2, dump2_base, w_offset);
+ __ Str(dump_base_w, MemOperand(dump2, dump_base.code() * kWRegSize));
+ __ Str(dump_w, MemOperand(dump2, dump.code() * kWRegSize));
+ __ Str(tmp_w, MemOperand(dump2, tmp.code() * kWRegSize));
+
+ __ Add(dump2, dump2_base, x_offset);
+ __ Str(dump_base, MemOperand(dump2, dump_base.code() * kXRegSize));
+ __ Str(dump, MemOperand(dump2, dump.code() * kXRegSize));
+ __ Str(tmp, MemOperand(dump2, tmp.code() * kXRegSize));
+
+ // Finally, restore dump2_base and dump2.
+ __ Ldr(dump2_base, MemOperand(dump2, dump2_base.code() * kXRegSize));
+ __ Ldr(dump2, MemOperand(dump2, dump2.code() * kXRegSize));
+
+ // Restore the MacroAssembler's scratch registers.
+ masm->TmpList()->set_list(old_tmp_list);
+ masm->FPTmpList()->set_list(old_fptmp_list);
+
+ completed_ = true;
+}
diff --git a/deps/v8/test/cctest/test-utils-arm64.h b/deps/v8/test/cctest/test-utils-arm64.h
new file mode 100644
index 000000000..2ff26e49c
--- /dev/null
+++ b/deps/v8/test/cctest/test-utils-arm64.h
@@ -0,0 +1,233 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM64_TEST_UTILS_ARM64_H_
+#define V8_ARM64_TEST_UTILS_ARM64_H_
+
+#include "v8.h"
+
+#include "macro-assembler.h"
+#include "arm64/macro-assembler-arm64.h"
+#include "arm64/utils-arm64.h"
+#include "cctest.h"
+
+
+using namespace v8::internal;
+
+
+// RegisterDump: Object allowing integer, floating point and flags registers
+// to be saved to itself for future reference.
+class RegisterDump {
+ public:
+ RegisterDump() : completed_(false) {}
+
+ // The Dump method generates code to store a snapshot of the register values.
+ // It needs to be able to use the stack temporarily, and requires that the
+ // current stack pointer is csp, and is properly aligned.
+ //
+ // The dumping code is generated though the given MacroAssembler. No registers
+ // are corrupted in the process, but the stack is used briefly. The flags will
+ // be corrupted during this call.
+ void Dump(MacroAssembler* assm);
+
+ // Register accessors.
+ inline int32_t wreg(unsigned code) const {
+ if (code == kSPRegInternalCode) {
+ return wspreg();
+ }
+ ASSERT(RegAliasesMatch(code));
+ return dump_.w_[code];
+ }
+
+ inline int64_t xreg(unsigned code) const {
+ if (code == kSPRegInternalCode) {
+ return spreg();
+ }
+ ASSERT(RegAliasesMatch(code));
+ return dump_.x_[code];
+ }
+
+ // FPRegister accessors.
+ inline uint32_t sreg_bits(unsigned code) const {
+ ASSERT(FPRegAliasesMatch(code));
+ return dump_.s_[code];
+ }
+
+ inline float sreg(unsigned code) const {
+ return rawbits_to_float(sreg_bits(code));
+ }
+
+ inline uint64_t dreg_bits(unsigned code) const {
+ ASSERT(FPRegAliasesMatch(code));
+ return dump_.d_[code];
+ }
+
+ inline double dreg(unsigned code) const {
+ return rawbits_to_double(dreg_bits(code));
+ }
+
+ // Stack pointer accessors.
+ inline int64_t spreg() const {
+ ASSERT(SPRegAliasesMatch());
+ return dump_.sp_;
+ }
+
+ inline int64_t wspreg() const {
+ ASSERT(SPRegAliasesMatch());
+ return dump_.wsp_;
+ }
+
+ // Flags accessors.
+ inline uint64_t flags_nzcv() const {
+ ASSERT(IsComplete());
+ ASSERT((dump_.flags_ & ~Flags_mask) == 0);
+ return dump_.flags_ & Flags_mask;
+ }
+
+ inline bool IsComplete() const {
+ return completed_;
+ }
+
+ private:
+ // Indicate whether the dump operation has been completed.
+ bool completed_;
+
+ // Check that the lower 32 bits of x<code> exactly match the 32 bits of
+ // w<code>. A failure of this test most likely represents a failure in the
+ // ::Dump method, or a failure in the simulator.
+ bool RegAliasesMatch(unsigned code) const {
+ ASSERT(IsComplete());
+ ASSERT(code < kNumberOfRegisters);
+ return ((dump_.x_[code] & kWRegMask) == dump_.w_[code]);
+ }
+
+ // As RegAliasesMatch, but for the stack pointer.
+ bool SPRegAliasesMatch() const {
+ ASSERT(IsComplete());
+ return ((dump_.sp_ & kWRegMask) == dump_.wsp_);
+ }
+
+ // As RegAliasesMatch, but for floating-point registers.
+ bool FPRegAliasesMatch(unsigned code) const {
+ ASSERT(IsComplete());
+ ASSERT(code < kNumberOfFPRegisters);
+ return (dump_.d_[code] & kSRegMask) == dump_.s_[code];
+ }
+
+ // Store all the dumped elements in a simple struct so the implementation can
+ // use offsetof to quickly find the correct field.
+ struct dump_t {
+ // Core registers.
+ uint64_t x_[kNumberOfRegisters];
+ uint32_t w_[kNumberOfRegisters];
+
+ // Floating-point registers, as raw bits.
+ uint64_t d_[kNumberOfFPRegisters];
+ uint32_t s_[kNumberOfFPRegisters];
+
+ // The stack pointer.
+ uint64_t sp_;
+ uint64_t wsp_;
+
+ // NZCV flags, stored in bits 28 to 31.
+ // bit[31] : Negative
+ // bit[30] : Zero
+ // bit[29] : Carry
+ // bit[28] : oVerflow
+ uint64_t flags_;
+ } dump_;
+
+ static dump_t for_sizeof();
+ STATIC_ASSERT(sizeof(for_sizeof().d_[0]) == kDRegSize);
+ STATIC_ASSERT(sizeof(for_sizeof().s_[0]) == kSRegSize);
+ STATIC_ASSERT(sizeof(for_sizeof().d_[0]) == kXRegSize);
+ STATIC_ASSERT(sizeof(for_sizeof().s_[0]) == kWRegSize);
+ STATIC_ASSERT(sizeof(for_sizeof().x_[0]) == kXRegSize);
+ STATIC_ASSERT(sizeof(for_sizeof().w_[0]) == kWRegSize);
+};
+
+// Some of these methods don't use the RegisterDump argument, but they have to
+// accept them so that they can overload those that take register arguments.
+bool Equal32(uint32_t expected, const RegisterDump*, uint32_t result);
+bool Equal64(uint64_t expected, const RegisterDump*, uint64_t result);
+
+bool EqualFP32(float expected, const RegisterDump*, float result);
+bool EqualFP64(double expected, const RegisterDump*, double result);
+
+bool Equal32(uint32_t expected, const RegisterDump* core, const Register& reg);
+bool Equal64(uint64_t expected, const RegisterDump* core, const Register& reg);
+
+bool EqualFP32(float expected, const RegisterDump* core,
+ const FPRegister& fpreg);
+bool EqualFP64(double expected, const RegisterDump* core,
+ const FPRegister& fpreg);
+
+bool Equal64(const Register& reg0, const RegisterDump* core,
+ const Register& reg1);
+
+bool EqualNzcv(uint32_t expected, uint32_t result);
+
+bool EqualRegisters(const RegisterDump* a, const RegisterDump* b);
+
+// Populate the w, x and r arrays with registers from the 'allowed' mask. The
+// r array will be populated with <reg_size>-sized registers,
+//
+// This allows for tests which use large, parameterized blocks of registers
+// (such as the push and pop tests), but where certain registers must be
+// avoided as they are used for other purposes.
+//
+// Any of w, x, or r can be NULL if they are not required.
+//
+// The return value is a RegList indicating which registers were allocated.
+RegList PopulateRegisterArray(Register* w, Register* x, Register* r,
+ int reg_size, int reg_count, RegList allowed);
+
+// As PopulateRegisterArray, but for floating-point registers.
+RegList PopulateFPRegisterArray(FPRegister* s, FPRegister* d, FPRegister* v,
+ int reg_size, int reg_count, RegList allowed);
+
+// Ovewrite the contents of the specified registers. This enables tests to
+// check that register contents are written in cases where it's likely that the
+// correct outcome could already be stored in the register.
+//
+// This always overwrites X-sized registers. If tests are operating on W
+// registers, a subsequent write into an aliased W register should clear the
+// top word anyway, so clobbering the full X registers should make tests more
+// rigorous.
+void Clobber(MacroAssembler* masm, RegList reg_list,
+ uint64_t const value = 0xfedcba9876543210UL);
+
+// As Clobber, but for FP registers.
+void ClobberFP(MacroAssembler* masm, RegList reg_list,
+ double const value = kFP64SignallingNaN);
+
+// As Clobber, but for a CPURegList with either FP or integer registers. When
+// using this method, the clobber value is always the default for the basic
+// Clobber or ClobberFP functions.
+void Clobber(MacroAssembler* masm, CPURegList reg_list);
+
+#endif // V8_ARM64_TEST_UTILS_ARM64_H_
diff --git a/deps/v8/test/cctest/testcfg.py b/deps/v8/test/cctest/testcfg.py
index 4ab5ab5b7..bd93450a9 100644
--- a/deps/v8/test/cctest/testcfg.py
+++ b/deps/v8/test/cctest/testcfg.py
@@ -38,8 +38,12 @@ class CcTestSuite(testsuite.TestSuite):
def __init__(self, name, root):
super(CcTestSuite, self).__init__(name, root)
+ if utils.IsWindows():
+ build_dir = "build"
+ else:
+ build_dir = "out"
self.serdes_dir = os.path.normpath(
- os.path.join(root, "..", "..", "out", ".serdes"))
+ os.path.join(root, "..", "..", build_dir, ".serdes"))
if os.path.exists(self.serdes_dir):
shutil.rmtree(self.serdes_dir, True)
os.makedirs(self.serdes_dir)