// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #include "v8.h" #include "ast.h" #include "bootstrapper.h" #include "codegen.h" #include "compilation-cache.h" #include "debug.h" #include "deoptimizer.h" #include "heap-profiler.h" #include "hydrogen.h" #include "isolate.h" #include "lithium-allocator.h" #include "log.h" #include "messages.h" #include "platform.h" #include "regexp-stack.h" #include "runtime-profiler.h" #include "scopeinfo.h" #include "serialize.h" #include "simulator.h" #include "spaces.h" #include "stub-cache.h" #include "version.h" #include "vm-state-inl.h" namespace v8 { namespace internal { Atomic32 ThreadId::highest_thread_id_ = 0; int ThreadId::AllocateThreadId() { int new_id = NoBarrier_AtomicIncrement(&highest_thread_id_, 1); return new_id; } int ThreadId::GetCurrentThreadId() { int thread_id = Thread::GetThreadLocalInt(Isolate::thread_id_key_); if (thread_id == 0) { thread_id = AllocateThreadId(); Thread::SetThreadLocalInt(Isolate::thread_id_key_, thread_id); } return thread_id; } ThreadLocalTop::ThreadLocalTop() { InitializeInternal(); // This flag may be set using v8::V8::IgnoreOutOfMemoryException() // before an isolate is initialized. The initialize methods below do // not touch it to preserve its value. ignore_out_of_memory_ = false; } void ThreadLocalTop::InitializeInternal() { c_entry_fp_ = 0; handler_ = 0; #ifdef USE_SIMULATOR simulator_ = NULL; #endif js_entry_sp_ = NULL; external_callback_ = NULL; current_vm_state_ = EXTERNAL; try_catch_handler_address_ = NULL; context_ = NULL; thread_id_ = ThreadId::Invalid(); external_caught_exception_ = false; failed_access_check_callback_ = NULL; save_context_ = NULL; catcher_ = NULL; top_lookup_result_ = NULL; // These members are re-initialized later after deserialization // is complete. pending_exception_ = NULL; has_pending_message_ = false; pending_message_obj_ = NULL; pending_message_script_ = NULL; scheduled_exception_ = NULL; } void ThreadLocalTop::Initialize() { InitializeInternal(); #ifdef USE_SIMULATOR #ifdef V8_TARGET_ARCH_ARM simulator_ = Simulator::current(isolate_); #elif V8_TARGET_ARCH_MIPS simulator_ = Simulator::current(isolate_); #endif #endif thread_id_ = ThreadId::Current(); } v8::TryCatch* ThreadLocalTop::TryCatchHandler() { return TRY_CATCH_FROM_ADDRESS(try_catch_handler_address()); } // Create a dummy thread that will wait forever on a semaphore. The only // purpose for this thread is to have some stack area to save essential data // into for use by a stacks only core dump (aka minidump). class PreallocatedMemoryThread: public Thread { public: char* data() { if (data_ready_semaphore_ != NULL) { // Initial access is guarded until the data has been published. data_ready_semaphore_->Wait(); delete data_ready_semaphore_; data_ready_semaphore_ = NULL; } return data_; } unsigned length() { if (data_ready_semaphore_ != NULL) { // Initial access is guarded until the data has been published. data_ready_semaphore_->Wait(); delete data_ready_semaphore_; data_ready_semaphore_ = NULL; } return length_; } // Stop the PreallocatedMemoryThread and release its resources. void StopThread() { keep_running_ = false; wait_for_ever_semaphore_->Signal(); // Wait for the thread to terminate. Join(); if (data_ready_semaphore_ != NULL) { delete data_ready_semaphore_; data_ready_semaphore_ = NULL; } delete wait_for_ever_semaphore_; wait_for_ever_semaphore_ = NULL; } protected: // When the thread starts running it will allocate a fixed number of bytes // on the stack and publish the location of this memory for others to use. void Run() { EmbeddedVector local_buffer; // Initialize the buffer with a known good value. OS::StrNCpy(local_buffer, "Trace data was not generated.\n", local_buffer.length()); // Publish the local buffer and signal its availability. data_ = local_buffer.start(); length_ = local_buffer.length(); data_ready_semaphore_->Signal(); while (keep_running_) { // This thread will wait here until the end of time. wait_for_ever_semaphore_->Wait(); } // Make sure we access the buffer after the wait to remove all possibility // of it being optimized away. OS::StrNCpy(local_buffer, "PreallocatedMemoryThread shutting down.\n", local_buffer.length()); } private: PreallocatedMemoryThread() : Thread("v8:PreallocMem"), keep_running_(true), wait_for_ever_semaphore_(OS::CreateSemaphore(0)), data_ready_semaphore_(OS::CreateSemaphore(0)), data_(NULL), length_(0) { } // Used to make sure that the thread keeps looping even for spurious wakeups. bool keep_running_; // This semaphore is used by the PreallocatedMemoryThread to wait for ever. Semaphore* wait_for_ever_semaphore_; // Semaphore to signal that the data has been initialized. Semaphore* data_ready_semaphore_; // Location and size of the preallocated memory block. char* data_; unsigned length_; friend class Isolate; DISALLOW_COPY_AND_ASSIGN(PreallocatedMemoryThread); }; void Isolate::PreallocatedMemoryThreadStart() { if (preallocated_memory_thread_ != NULL) return; preallocated_memory_thread_ = new PreallocatedMemoryThread(); preallocated_memory_thread_->Start(); } void Isolate::PreallocatedMemoryThreadStop() { if (preallocated_memory_thread_ == NULL) return; preallocated_memory_thread_->StopThread(); // Done with the thread entirely. delete preallocated_memory_thread_; preallocated_memory_thread_ = NULL; } void Isolate::PreallocatedStorageInit(size_t size) { ASSERT(free_list_.next_ == &free_list_); ASSERT(free_list_.previous_ == &free_list_); PreallocatedStorage* free_chunk = reinterpret_cast(new char[size]); free_list_.next_ = free_list_.previous_ = free_chunk; free_chunk->next_ = free_chunk->previous_ = &free_list_; free_chunk->size_ = size - sizeof(PreallocatedStorage); preallocated_storage_preallocated_ = true; } void* Isolate::PreallocatedStorageNew(size_t size) { if (!preallocated_storage_preallocated_) { return FreeStoreAllocationPolicy().New(size); } ASSERT(free_list_.next_ != &free_list_); ASSERT(free_list_.previous_ != &free_list_); size = (size + kPointerSize - 1) & ~(kPointerSize - 1); // Search for exact fit. for (PreallocatedStorage* storage = free_list_.next_; storage != &free_list_; storage = storage->next_) { if (storage->size_ == size) { storage->Unlink(); storage->LinkTo(&in_use_list_); return reinterpret_cast(storage + 1); } } // Search for first fit. for (PreallocatedStorage* storage = free_list_.next_; storage != &free_list_; storage = storage->next_) { if (storage->size_ >= size + sizeof(PreallocatedStorage)) { storage->Unlink(); storage->LinkTo(&in_use_list_); PreallocatedStorage* left_over = reinterpret_cast( reinterpret_cast(storage + 1) + size); left_over->size_ = storage->size_ - size - sizeof(PreallocatedStorage); ASSERT(size + left_over->size_ + sizeof(PreallocatedStorage) == storage->size_); storage->size_ = size; left_over->LinkTo(&free_list_); return reinterpret_cast(storage + 1); } } // Allocation failure. ASSERT(false); return NULL; } // We don't attempt to coalesce. void Isolate::PreallocatedStorageDelete(void* p) { if (p == NULL) { return; } if (!preallocated_storage_preallocated_) { FreeStoreAllocationPolicy::Delete(p); return; } PreallocatedStorage* storage = reinterpret_cast(p) - 1; ASSERT(storage->next_->previous_ == storage); ASSERT(storage->previous_->next_ == storage); storage->Unlink(); storage->LinkTo(&free_list_); } Isolate* Isolate::default_isolate_ = NULL; Thread::LocalStorageKey Isolate::isolate_key_; Thread::LocalStorageKey Isolate::thread_id_key_; Thread::LocalStorageKey Isolate::per_isolate_thread_data_key_; Mutex* Isolate::process_wide_mutex_ = OS::CreateMutex(); Isolate::ThreadDataTable* Isolate::thread_data_table_ = NULL; Isolate::PerIsolateThreadData* Isolate::AllocatePerIsolateThreadData( ThreadId thread_id) { ASSERT(!thread_id.Equals(ThreadId::Invalid())); PerIsolateThreadData* per_thread = new PerIsolateThreadData(this, thread_id); { ScopedLock lock(process_wide_mutex_); ASSERT(thread_data_table_->Lookup(this, thread_id) == NULL); thread_data_table_->Insert(per_thread); ASSERT(thread_data_table_->Lookup(this, thread_id) == per_thread); } return per_thread; } Isolate::PerIsolateThreadData* Isolate::FindOrAllocatePerThreadDataForThisThread() { ThreadId thread_id = ThreadId::Current(); PerIsolateThreadData* per_thread = NULL; { ScopedLock lock(process_wide_mutex_); per_thread = thread_data_table_->Lookup(this, thread_id); if (per_thread == NULL) { per_thread = AllocatePerIsolateThreadData(thread_id); } } return per_thread; } Isolate::PerIsolateThreadData* Isolate::FindPerThreadDataForThisThread() { ThreadId thread_id = ThreadId::Current(); PerIsolateThreadData* per_thread = NULL; { ScopedLock lock(process_wide_mutex_); per_thread = thread_data_table_->Lookup(this, thread_id); } return per_thread; } void Isolate::EnsureDefaultIsolate() { ScopedLock lock(process_wide_mutex_); if (default_isolate_ == NULL) { isolate_key_ = Thread::CreateThreadLocalKey(); thread_id_key_ = Thread::CreateThreadLocalKey(); per_isolate_thread_data_key_ = Thread::CreateThreadLocalKey(); thread_data_table_ = new Isolate::ThreadDataTable(); default_isolate_ = new Isolate(); } // Can't use SetIsolateThreadLocals(default_isolate_, NULL) here // because a non-null thread data may be already set. if (Thread::GetThreadLocal(isolate_key_) == NULL) { Thread::SetThreadLocal(isolate_key_, default_isolate_); } } struct StaticInitializer { StaticInitializer() { Isolate::EnsureDefaultIsolate(); } } static_initializer; #ifdef ENABLE_DEBUGGER_SUPPORT Debugger* Isolate::GetDefaultIsolateDebugger() { EnsureDefaultIsolate(); return default_isolate_->debugger(); } #endif StackGuard* Isolate::GetDefaultIsolateStackGuard() { EnsureDefaultIsolate(); return default_isolate_->stack_guard(); } void Isolate::EnterDefaultIsolate() { EnsureDefaultIsolate(); ASSERT(default_isolate_ != NULL); PerIsolateThreadData* data = CurrentPerIsolateThreadData(); // If not yet in default isolate - enter it. if (data == NULL || data->isolate() != default_isolate_) { default_isolate_->Enter(); } } Isolate* Isolate::GetDefaultIsolateForLocking() { EnsureDefaultIsolate(); return default_isolate_; } Address Isolate::get_address_from_id(Isolate::AddressId id) { return isolate_addresses_[id]; } char* Isolate::Iterate(ObjectVisitor* v, char* thread_storage) { ThreadLocalTop* thread = reinterpret_cast(thread_storage); Iterate(v, thread); return thread_storage + sizeof(ThreadLocalTop); } void Isolate::IterateThread(ThreadVisitor* v) { v->VisitThread(this, thread_local_top()); } void Isolate::IterateThread(ThreadVisitor* v, char* t) { ThreadLocalTop* thread = reinterpret_cast(t); v->VisitThread(this, thread); } void Isolate::Iterate(ObjectVisitor* v, ThreadLocalTop* thread) { // Visit the roots from the top for a given thread. Object* pending; // The pending exception can sometimes be a failure. We can't show // that to the GC, which only understands objects. if (thread->pending_exception_->ToObject(&pending)) { v->VisitPointer(&pending); thread->pending_exception_ = pending; // In case GC updated it. } v->VisitPointer(&(thread->pending_message_obj_)); v->VisitPointer(BitCast(&(thread->pending_message_script_))); v->VisitPointer(BitCast(&(thread->context_))); Object* scheduled; if (thread->scheduled_exception_->ToObject(&scheduled)) { v->VisitPointer(&scheduled); thread->scheduled_exception_ = scheduled; } for (v8::TryCatch* block = thread->TryCatchHandler(); block != NULL; block = TRY_CATCH_FROM_ADDRESS(block->next_)) { v->VisitPointer(BitCast(&(block->exception_))); v->VisitPointer(BitCast(&(block->message_))); } // Iterate over pointers on native execution stack. for (StackFrameIterator it(this, thread); !it.done(); it.Advance()) { it.frame()->Iterate(v); } // Iterate pointers in live lookup results. thread->top_lookup_result_->Iterate(v); } void Isolate::Iterate(ObjectVisitor* v) { ThreadLocalTop* current_t = thread_local_top(); Iterate(v, current_t); } void Isolate::IterateDeferredHandles(ObjectVisitor* visitor) { for (DeferredHandles* deferred = deferred_handles_head_; deferred != NULL; deferred = deferred->next_) { deferred->Iterate(visitor); } } void Isolate::RegisterTryCatchHandler(v8::TryCatch* that) { // The ARM simulator has a separate JS stack. We therefore register // the C++ try catch handler with the simulator and get back an // address that can be used for comparisons with addresses into the // JS stack. When running without the simulator, the address // returned will be the address of the C++ try catch handler itself. Address address = reinterpret_cast
( SimulatorStack::RegisterCTryCatch(reinterpret_cast(that))); thread_local_top()->set_try_catch_handler_address(address); } void Isolate::UnregisterTryCatchHandler(v8::TryCatch* that) { ASSERT(thread_local_top()->TryCatchHandler() == that); thread_local_top()->set_try_catch_handler_address( reinterpret_cast
(that->next_)); thread_local_top()->catcher_ = NULL; SimulatorStack::UnregisterCTryCatch(); } Handle Isolate::StackTraceString() { if (stack_trace_nesting_level_ == 0) { stack_trace_nesting_level_++; HeapStringAllocator allocator; StringStream::ClearMentionedObjectCache(); StringStream accumulator(&allocator); incomplete_message_ = &accumulator; PrintStack(&accumulator); Handle stack_trace = accumulator.ToString(); incomplete_message_ = NULL; stack_trace_nesting_level_ = 0; return stack_trace; } else if (stack_trace_nesting_level_ == 1) { stack_trace_nesting_level_++; OS::PrintError( "\n\nAttempt to print stack while printing stack (double fault)\n"); OS::PrintError( "If you are lucky you may find a partial stack dump on stdout.\n\n"); incomplete_message_->OutputToStdOut(); return factory()->empty_symbol(); } else { OS::Abort(); // Unreachable return factory()->empty_symbol(); } } void Isolate::PushStackTraceAndDie(unsigned int magic, Object* object, Map* map, unsigned int magic2) { const int kMaxStackTraceSize = 8192; Handle trace = StackTraceString(); char buffer[kMaxStackTraceSize]; int length = Min(kMaxStackTraceSize - 1, trace->length()); String::WriteToFlat(*trace, buffer, 0, length); buffer[length] = '\0'; OS::PrintError("Stacktrace (%x-%x) %p %p: %s\n", magic, magic2, static_cast(object), static_cast(map), buffer); OS::Abort(); } void Isolate::CaptureAndSetCurrentStackTraceFor(Handle error_object) { if (capture_stack_trace_for_uncaught_exceptions_) { // Capture stack trace for a detailed exception message. Handle key = factory()->hidden_stack_trace_symbol(); Handle stack_trace = CaptureCurrentStackTrace( stack_trace_for_uncaught_exceptions_frame_limit_, stack_trace_for_uncaught_exceptions_options_); JSObject::SetHiddenProperty(error_object, key, stack_trace); } } Handle Isolate::CaptureCurrentStackTrace( int frame_limit, StackTrace::StackTraceOptions options) { // Ensure no negative values. int limit = Max(frame_limit, 0); Handle stack_trace = factory()->NewJSArray(frame_limit); Handle column_key = factory()->LookupAsciiSymbol("column"); Handle line_key = factory()->LookupAsciiSymbol("lineNumber"); Handle script_key = factory()->LookupAsciiSymbol("scriptName"); Handle name_or_source_url_key = factory()->LookupAsciiSymbol("nameOrSourceURL"); Handle script_name_or_source_url_key = factory()->LookupAsciiSymbol("scriptNameOrSourceURL"); Handle function_key = factory()->LookupAsciiSymbol("functionName"); Handle eval_key = factory()->LookupAsciiSymbol("isEval"); Handle constructor_key = factory()->LookupAsciiSymbol("isConstructor"); StackTraceFrameIterator it(this); int frames_seen = 0; while (!it.done() && (frames_seen < limit)) { JavaScriptFrame* frame = it.frame(); // Set initial size to the maximum inlining level + 1 for the outermost // function. List frames(Compiler::kMaxInliningLevels + 1); frame->Summarize(&frames); for (int i = frames.length() - 1; i >= 0 && frames_seen < limit; i--) { // Create a JSObject to hold the information for the StackFrame. Handle stack_frame = factory()->NewJSObject(object_function()); Handle fun = frames[i].function(); Handle