// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // // Notes about usage of this object by VideoCaptureImplManager. // // VideoCaptureImplManager access this object by using a Unretained() // binding and tasks on the IO thread. It is then important that // VideoCaptureImpl never post task to itself. All operations must be // synchronous. #include "third_party/blink/renderer/platform/video_capture/video_capture_impl.h" #include #include #include #include #include "base/bind.h" #include "base/bind_helpers.h" #include "base/macros.h" #include "base/stl_util.h" #include "base/trace_event/trace_event.h" #include "gpu/command_buffer/client/shared_image_interface.h" #include "gpu/command_buffer/common/shared_image_usage.h" #include "gpu/ipc/common/gpu_memory_buffer_support.h" #include "media/base/bind_to_current_loop.h" #include "media/base/limits.h" #include "media/base/video_frame.h" #include "media/capture/mojom/video_capture_types.mojom-blink.h" #include "media/video/gpu_video_accelerator_factories.h" #include "mojo/public/cpp/system/platform_handle.h" #include "third_party/blink/public/common/thread_safe_browser_interface_broker_proxy.h" #include "third_party/blink/public/platform/platform.h" #include "third_party/blink/renderer/platform/wtf/text/wtf_string.h" #include "third_party/blink/renderer/platform/wtf/vector.h" namespace blink { using VideoFrameBufferHandleType = media::mojom::blink::VideoBufferHandle::Tag; // A collection of all types of handles that we use to reference a camera buffer // backed with GpuMemoryBuffer. struct GpuMemoryBufferResources { explicit GpuMemoryBufferResources(gfx::GpuMemoryBufferHandle handle) : gpu_memory_buffer_handle(std::move(handle)) {} // Stores the GpuMemoryBufferHandle when a new buffer is first registered. // |gpu_memory_buffer_handle| is converted to |gpu_memory_buffer| below when // the camera frame is ready for the first time. gfx::GpuMemoryBufferHandle gpu_memory_buffer_handle; // The GpuMemoryBuffer backing the camera frame. std::unique_ptr gpu_memory_buffer; // The SharedImage created from |gpu_memory_buffer|. gpu::Mailbox mailbox; // The release sync token for |mailbox|. gpu::SyncToken release_sync_token; }; struct VideoCaptureImpl::BufferContext : public base::RefCountedThreadSafe { public: BufferContext(media::mojom::blink::VideoBufferHandlePtr buffer_handle, media::GpuVideoAcceleratorFactories* gpu_factories, scoped_refptr media_task_runner) : buffer_type_(buffer_handle->which()), gpu_factories_(gpu_factories), media_task_runner_(std::move(media_task_runner)) { switch (buffer_type_) { case VideoFrameBufferHandleType::SHARED_BUFFER_HANDLE: InitializeFromSharedMemory( std::move(buffer_handle->get_shared_buffer_handle())); break; case VideoFrameBufferHandleType::READ_ONLY_SHMEM_REGION: InitializeFromReadOnlyShmemRegion( std::move(buffer_handle->get_read_only_shmem_region())); break; case VideoFrameBufferHandleType::SHARED_MEMORY_VIA_RAW_FILE_DESCRIPTOR: NOTREACHED(); break; case VideoFrameBufferHandleType::MAILBOX_HANDLES: InitializeFromMailbox(std::move(buffer_handle->get_mailbox_handles())); break; case VideoFrameBufferHandleType::GPU_MEMORY_BUFFER_HANDLE: CHECK(gpu_factories_); CHECK(media_task_runner_); InitializeFromGpuMemoryBufferHandle( std::move(buffer_handle->get_gpu_memory_buffer_handle())); break; } } VideoFrameBufferHandleType buffer_type() const { return buffer_type_; } const uint8_t* data() const { return data_; } size_t data_size() const { return data_size_; } const Vector& mailbox_holders() const { return mailbox_holders_; } gfx::GpuMemoryBufferHandle TakeGpuMemoryBufferHandle() { return std::move(gmb_resources_->gpu_memory_buffer_handle); } void SetGpuMemoryBuffer( std::unique_ptr gpu_memory_buffer) { gmb_resources_->gpu_memory_buffer = std::move(gpu_memory_buffer); } gfx::GpuMemoryBuffer* GetGpuMemoryBuffer() { return gmb_resources_->gpu_memory_buffer.get(); } // Creates SharedImage mailboxes for |gpu_memory_buffer_handle_| and wraps the // mailboxes with the buffer handles in a DMA-buf VideoFrame. The consumer of // the VideoFrame can access the data either through mailboxes (e.g. display) // or through the DMA-buf FDs (e.g. video encoder). static void BindBufferToTextureOnMediaThread( scoped_refptr buffer_context, media::mojom::blink::VideoFrameInfoPtr info, std::unique_ptr gpu_memory_buffer, scoped_refptr frame, base::OnceCallback, scoped_refptr)> on_texture_bound) { DCHECK(buffer_context->media_task_runner_->BelongsToCurrentThread()); DCHECK(buffer_context->gpu_factories_); DCHECK_EQ(info->pixel_format, media::PIXEL_FORMAT_NV12); DCHECK_EQ( buffer_context->gpu_factories_->VideoFrameOutputFormat( info->pixel_format), media::GpuVideoAcceleratorFactories::OutputFormat::NV12_SINGLE_GMB); // Create GPU texture and bind GpuMemoryBuffer to the texture. auto* sii = buffer_context->gpu_factories_->SharedImageInterface(); if (!sii) { std::move(on_texture_bound) .Run(std::move(info), std::move(frame), std::move(buffer_context)); return; } unsigned texture_target = buffer_context->gpu_factories_->ImageTextureTarget( gpu_memory_buffer->GetFormat()); if (buffer_context->gmb_resources_->mailbox.IsZero()) { uint32_t usage = gpu::SHARED_IMAGE_USAGE_GLES2 | gpu::SHARED_IMAGE_USAGE_RASTER | gpu::SHARED_IMAGE_USAGE_DISPLAY | gpu::SHARED_IMAGE_USAGE_SCANOUT; buffer_context->gmb_resources_->mailbox = sii->CreateSharedImage( gpu_memory_buffer.get(), buffer_context->gpu_factories_->GpuMemoryBufferManager(), *(info->color_space), usage); } else { sii->UpdateSharedImage(buffer_context->gmb_resources_->release_sync_token, buffer_context->gmb_resources_->mailbox); } gpu::SyncToken sync_token = sii->GenUnverifiedSyncToken(); CHECK(!buffer_context->gmb_resources_->mailbox.IsZero()); gpu::MailboxHolder mailbox_holder_array[media::VideoFrame::kMaxPlanes]; mailbox_holder_array[0] = gpu::MailboxHolder( buffer_context->gmb_resources_->mailbox, sync_token, texture_target); const auto gmb_size = gpu_memory_buffer->GetSize(); frame = media::VideoFrame::WrapExternalGpuMemoryBuffer( gfx::Rect(info->visible_rect), gmb_size, std::move(gpu_memory_buffer), mailbox_holder_array, base::BindOnce(&BufferContext::MailboxHolderReleased, buffer_context), info->timestamp); frame->metadata()->SetBoolean(media::VideoFrameMetadata::ALLOW_OVERLAY, true); frame->metadata()->SetBoolean( media::VideoFrameMetadata::READ_LOCK_FENCES_ENABLED, true); std::move(on_texture_bound) .Run(std::move(info), std::move(frame), std::move(buffer_context)); } static void MailboxHolderReleased(scoped_refptr buffer_context, const gpu::SyncToken& release_sync_token) { if (!buffer_context->media_task_runner_->BelongsToCurrentThread()) { buffer_context->media_task_runner_->PostTask( FROM_HERE, base::BindOnce(&BufferContext::MailboxHolderReleased, std::move(buffer_context), release_sync_token)); return; } buffer_context->gmb_resources_->release_sync_token = release_sync_token; } static void DestroyTextureOnMediaThread( media::GpuVideoAcceleratorFactories* gpu_factories, gpu::Mailbox mailbox, gpu::SyncToken release_sync_token) { if (!mailbox.IsZero()) { auto* sii = gpu_factories->SharedImageInterface(); if (!sii) return; sii->DestroySharedImage(release_sync_token, mailbox); } } private: void InitializeFromSharedMemory(mojo::ScopedSharedBufferHandle handle) { DCHECK(handle.is_valid()); base::UnsafeSharedMemoryRegion region = mojo::UnwrapUnsafeSharedMemoryRegion(std::move(handle)); if (!region.IsValid()) { DLOG(ERROR) << "Unwrapping shared memory failed."; return; } writable_mapping_ = region.Map(); if (!writable_mapping_.IsValid()) { DLOG(ERROR) << "Mapping shared memory failed."; return; } data_ = writable_mapping_.GetMemoryAsSpan().data(); data_size_ = writable_mapping_.size(); } void InitializeFromReadOnlyShmemRegion( base::ReadOnlySharedMemoryRegion region) { DCHECK(region.IsValid()); read_only_mapping_ = region.Map(); DCHECK(read_only_mapping_.IsValid()); data_ = read_only_mapping_.GetMemoryAsSpan().data(); data_size_ = read_only_mapping_.size(); } void InitializeFromMailbox( media::mojom::blink::MailboxBufferHandleSetPtr mailbox_handles) { DCHECK_EQ(media::VideoFrame::kMaxPlanes, mailbox_handles->mailbox_holder.size()); mailbox_holders_ = std::move(mailbox_handles->mailbox_holder); } void InitializeFromGpuMemoryBufferHandle( gfx::GpuMemoryBufferHandle gpu_memory_buffer_handle) { gmb_resources_ = std::make_unique( std::move(gpu_memory_buffer_handle)); } friend class base::RefCountedThreadSafe; virtual ~BufferContext() { if (buffer_type_ == VideoFrameBufferHandleType::GPU_MEMORY_BUFFER_HANDLE) { media_task_runner_->PostTask( FROM_HERE, base::BindOnce(&BufferContext::DestroyTextureOnMediaThread, gpu_factories_, gmb_resources_->mailbox, gmb_resources_->release_sync_token)); } } VideoFrameBufferHandleType buffer_type_; // Only valid for |buffer_type_ == SHARED_BUFFER_HANDLE|. base::WritableSharedMemoryMapping writable_mapping_; // Only valid for |buffer_type_ == READ_ONLY_SHMEM_REGION|. base::ReadOnlySharedMemoryMapping read_only_mapping_; // These point into one of the above mappings, which hold the mapping open for // the lifetime of this object. const uint8_t* data_ = nullptr; size_t data_size_ = 0; // Only valid for |buffer_type_ == MAILBOX_HANDLES|. Vector mailbox_holders_; // The following is for |buffer_type == GPU_MEMORY_BUFFER_HANDLE|. // Uses to create SharedImage from |gpu_memory_buffer_|. media::GpuVideoAcceleratorFactories* gpu_factories_; // The task runner that |gpu_factories_| runs on. const scoped_refptr media_task_runner_; std::unique_ptr gmb_resources_; DISALLOW_COPY_AND_ASSIGN(BufferContext); }; // Information about a video capture client of ours. struct VideoCaptureImpl::ClientInfo { ClientInfo() = default; ClientInfo(const ClientInfo& other) = default; ~ClientInfo() = default; media::VideoCaptureParams params; VideoCaptureStateUpdateCB state_update_cb; VideoCaptureDeliverFrameCB deliver_frame_cb; }; VideoCaptureImpl::VideoCaptureImpl(media::VideoCaptureSessionId session_id) : device_id_(session_id), session_id_(session_id), video_capture_host_for_testing_(nullptr), state_(blink::VIDEO_CAPTURE_STATE_STOPPED), gpu_memory_buffer_support_(new gpu::GpuMemoryBufferSupport()) { CHECK(!session_id.is_empty()); DETACH_FROM_THREAD(io_thread_checker_); Platform::Current()->GetBrowserInterfaceBroker()->GetInterface( pending_video_capture_host_.InitWithNewPipeAndPassReceiver()); gpu_factories_ = Platform::Current()->GetGpuFactories(); if (gpu_factories_) { media_task_runner_ = gpu_factories_->GetTaskRunner(); } } VideoCaptureImpl::~VideoCaptureImpl() { DCHECK_CALLED_ON_VALID_THREAD(io_thread_checker_); if ((state_ == VIDEO_CAPTURE_STATE_STARTING || state_ == VIDEO_CAPTURE_STATE_STARTED) && GetVideoCaptureHost()) GetVideoCaptureHost()->Stop(device_id_); } void VideoCaptureImpl::SuspendCapture(bool suspend) { DCHECK_CALLED_ON_VALID_THREAD(io_thread_checker_); if (suspend) GetVideoCaptureHost()->Pause(device_id_); else GetVideoCaptureHost()->Resume(device_id_, session_id_, params_); } void VideoCaptureImpl::StartCapture( int client_id, const media::VideoCaptureParams& params, const VideoCaptureStateUpdateCB& state_update_cb, const VideoCaptureDeliverFrameCB& deliver_frame_cb) { DVLOG(1) << __func__ << " |device_id_| = " << device_id_; DCHECK_CALLED_ON_VALID_THREAD(io_thread_checker_); OnLog("VideoCaptureImpl got request to start capture."); ClientInfo client_info; client_info.params = params; client_info.state_update_cb = state_update_cb; client_info.deliver_frame_cb = deliver_frame_cb; switch (state_) { case VIDEO_CAPTURE_STATE_STARTING: case VIDEO_CAPTURE_STATE_STARTED: clients_[client_id] = client_info; OnLog("VideoCaptureImpl capture is already started or starting."); // TODO(sheu): Allowing resolution change will require that all // outstanding clients of a capture session support resolution change. DCHECK_EQ(params_.resolution_change_policy, params.resolution_change_policy); return; case VIDEO_CAPTURE_STATE_STOPPING: clients_pending_on_restart_[client_id] = client_info; DVLOG(1) << __func__ << " Got new resolution while stopping: " << params.requested_format.frame_size.ToString(); return; case VIDEO_CAPTURE_STATE_STOPPED: case VIDEO_CAPTURE_STATE_ENDED: clients_[client_id] = client_info; params_ = params; params_.requested_format.frame_rate = std::min(params_.requested_format.frame_rate, static_cast(media::limits::kMaxFramesPerSecond)); DVLOG(1) << "StartCapture: starting with first resolution " << params_.requested_format.frame_size.ToString(); OnLog("VideoCaptureImpl starting capture."); StartCaptureInternal(); return; case VIDEO_CAPTURE_STATE_ERROR: OnLog("VideoCaptureImpl is in error state."); state_update_cb.Run(blink::VIDEO_CAPTURE_STATE_ERROR); return; case VIDEO_CAPTURE_STATE_PAUSED: case VIDEO_CAPTURE_STATE_RESUMED: // The internal |state_| is never set to PAUSED/RESUMED since // VideoCaptureImpl is not modified by those. NOTREACHED(); return; } } void VideoCaptureImpl::StopCapture(int client_id) { DCHECK_CALLED_ON_VALID_THREAD(io_thread_checker_); // A client ID can be in only one client list. // If this ID is in any client list, we can just remove it from // that client list and don't have to run the other following RemoveClient(). if (!RemoveClient(client_id, &clients_pending_on_restart_)) { RemoveClient(client_id, &clients_); } if (!clients_.empty()) return; DVLOG(1) << "StopCapture: No more client, stopping ..."; StopDevice(); client_buffers_.clear(); weak_factory_.InvalidateWeakPtrs(); } void VideoCaptureImpl::RequestRefreshFrame() { DCHECK_CALLED_ON_VALID_THREAD(io_thread_checker_); GetVideoCaptureHost()->RequestRefreshFrame(device_id_); } void VideoCaptureImpl::GetDeviceSupportedFormats( VideoCaptureDeviceFormatsCallback callback) { DCHECK_CALLED_ON_VALID_THREAD(io_thread_checker_); GetVideoCaptureHost()->GetDeviceSupportedFormats( device_id_, session_id_, base::BindOnce(&VideoCaptureImpl::OnDeviceSupportedFormats, weak_factory_.GetWeakPtr(), std::move(callback))); } void VideoCaptureImpl::GetDeviceFormatsInUse( VideoCaptureDeviceFormatsCallback callback) { DCHECK_CALLED_ON_VALID_THREAD(io_thread_checker_); GetVideoCaptureHost()->GetDeviceFormatsInUse( device_id_, session_id_, base::BindOnce(&VideoCaptureImpl::OnDeviceFormatsInUse, weak_factory_.GetWeakPtr(), std::move(callback))); } void VideoCaptureImpl::OnFrameDropped( media::VideoCaptureFrameDropReason reason) { GetVideoCaptureHost()->OnFrameDropped(device_id_, reason); } void VideoCaptureImpl::OnLog(const String& message) { GetVideoCaptureHost()->OnLog(device_id_, message); } void VideoCaptureImpl::SetGpuMemoryBufferSupportForTesting( std::unique_ptr gpu_memory_buffer_support) { gpu_memory_buffer_support_ = std::move(gpu_memory_buffer_support); } void VideoCaptureImpl::OnStateChanged(media::mojom::VideoCaptureState state) { DVLOG(1) << __func__ << " state: " << state; DCHECK_CALLED_ON_VALID_THREAD(io_thread_checker_); switch (state) { case media::mojom::VideoCaptureState::STARTED: OnLog("VideoCaptureImpl changing state to VIDEO_CAPTURE_STATE_STARTED"); state_ = VIDEO_CAPTURE_STATE_STARTED; for (const auto& client : clients_) client.second.state_update_cb.Run(blink::VIDEO_CAPTURE_STATE_STARTED); // In case there is any frame dropped before STARTED, always request for // a frame refresh to start the video call with. // Capture device will make a decision if it should refresh a frame. RequestRefreshFrame(); break; case media::mojom::VideoCaptureState::STOPPED: OnLog("VideoCaptureImpl changing state to VIDEO_CAPTURE_STATE_STOPPED"); state_ = VIDEO_CAPTURE_STATE_STOPPED; client_buffers_.clear(); weak_factory_.InvalidateWeakPtrs(); if (!clients_.empty() || !clients_pending_on_restart_.empty()) { OnLog("VideoCaptureImpl restarting capture"); RestartCapture(); } break; case media::mojom::VideoCaptureState::PAUSED: for (const auto& client : clients_) client.second.state_update_cb.Run(blink::VIDEO_CAPTURE_STATE_PAUSED); break; case media::mojom::VideoCaptureState::RESUMED: for (const auto& client : clients_) client.second.state_update_cb.Run(blink::VIDEO_CAPTURE_STATE_RESUMED); break; case media::mojom::VideoCaptureState::FAILED: OnLog("VideoCaptureImpl changing state to VIDEO_CAPTURE_STATE_ERROR"); for (const auto& client : clients_) client.second.state_update_cb.Run(blink::VIDEO_CAPTURE_STATE_ERROR); clients_.clear(); state_ = VIDEO_CAPTURE_STATE_ERROR; break; case media::mojom::VideoCaptureState::ENDED: OnLog("VideoCaptureImpl changing state to VIDEO_CAPTURE_STATE_ENDED"); // We'll only notify the client that the stream has stopped. for (const auto& client : clients_) client.second.state_update_cb.Run(blink::VIDEO_CAPTURE_STATE_STOPPED); clients_.clear(); state_ = VIDEO_CAPTURE_STATE_ENDED; break; } } void VideoCaptureImpl::OnNewBuffer( int32_t buffer_id, media::mojom::blink::VideoBufferHandlePtr buffer_handle) { DVLOG(1) << __func__ << " buffer_id: " << buffer_id; DCHECK_CALLED_ON_VALID_THREAD(io_thread_checker_); const bool inserted = client_buffers_ .emplace(buffer_id, new BufferContext(std::move(buffer_handle), gpu_factories_, media_task_runner_)) .second; DCHECK(inserted); } void VideoCaptureImpl::OnBufferReady( int32_t buffer_id, media::mojom::blink::VideoFrameInfoPtr info) { DVLOG(1) << __func__ << " buffer_id: " << buffer_id; DCHECK_CALLED_ON_VALID_THREAD(io_thread_checker_); bool consume_buffer = state_ == VIDEO_CAPTURE_STATE_STARTED; if (!consume_buffer) { OnFrameDropped( media::VideoCaptureFrameDropReason::kVideoCaptureImplNotInStartedState); GetVideoCaptureHost()->ReleaseBuffer(device_id_, buffer_id, -1.0); return; } base::TimeTicks reference_time; media::VideoFrameMetadata frame_metadata; frame_metadata.MergeInternalValuesFrom(info->metadata); const bool success = frame_metadata.GetTimeTicks( media::VideoFrameMetadata::REFERENCE_TIME, &reference_time); DCHECK(success); if (first_frame_ref_time_.is_null()) { first_frame_ref_time_ = reference_time; OnLog("First frame received at VideoCaptureImpl"); } // If the timestamp is not prepared, we use reference time to make a rough // estimate. e.g. ThreadSafeCaptureOracle::DidCaptureFrame(). // TODO(miu): Fix upstream capturers to always set timestamp and reference // time. See http://crbug/618407/ for tracking. if (info->timestamp.is_zero()) info->timestamp = reference_time - first_frame_ref_time_; // TODO(qiangchen): Change the metric name to "reference_time" and // "timestamp", so that we have consistent naming everywhere. // Used by chrome/browser/extension/api/cast_streaming/performance_test.cc TRACE_EVENT_INSTANT2("cast_perf_test", "OnBufferReceived", TRACE_EVENT_SCOPE_THREAD, "timestamp", (reference_time - base::TimeTicks()).InMicroseconds(), "time_delta", info->timestamp.InMicroseconds()); const auto& iter = client_buffers_.find(buffer_id); DCHECK(iter != client_buffers_.end()); scoped_refptr buffer_context = iter->second; scoped_refptr frame; switch (buffer_context->buffer_type()) { case VideoFrameBufferHandleType::SHARED_BUFFER_HANDLE: // The frame is backed by a writable (unsafe) shared memory handle, but as // it is not sent cross-process the region does not need to be attached to // the frame. See also the case for READ_ONLY_SHMEM_REGION. if (info->strides) { CHECK(IsYuvPlanar(info->pixel_format) && (media::VideoFrame::NumPlanes(info->pixel_format) == 3)) << "Currently, only YUV formats support custom strides."; uint8_t* y_data = const_cast(buffer_context->data()); uint8_t* u_data = y_data + (media::VideoFrame::Rows(media::VideoFrame::kYPlane, info->pixel_format, info->coded_size.height()) * info->strides->stride_by_plane[0]); uint8_t* v_data = u_data + (media::VideoFrame::Rows(media::VideoFrame::kUPlane, info->pixel_format, info->coded_size.height()) * info->strides->stride_by_plane[1]); frame = media::VideoFrame::WrapExternalYuvData( info->pixel_format, gfx::Size(info->coded_size), gfx::Rect(info->visible_rect), info->visible_rect.size(), info->strides->stride_by_plane[0], info->strides->stride_by_plane[1], info->strides->stride_by_plane[2], y_data, u_data, v_data, info->timestamp); } else { frame = media::VideoFrame::WrapExternalData( info->pixel_format, gfx::Size(info->coded_size), gfx::Rect(info->visible_rect), info->visible_rect.size(), const_cast(buffer_context->data()), buffer_context->data_size(), info->timestamp); } break; case VideoFrameBufferHandleType::READ_ONLY_SHMEM_REGION: // As with the SHARED_BUFFER_HANDLE type, it is sufficient to just wrap // the data without attaching the shared region to the frame. frame = media::VideoFrame::WrapExternalData( info->pixel_format, gfx::Size(info->coded_size), gfx::Rect(info->visible_rect), info->visible_rect.size(), const_cast(buffer_context->data()), buffer_context->data_size(), info->timestamp); break; case VideoFrameBufferHandleType::SHARED_MEMORY_VIA_RAW_FILE_DESCRIPTOR: NOTREACHED(); break; case VideoFrameBufferHandleType::MAILBOX_HANDLES: { gpu::MailboxHolder mailbox_holder_array[media::VideoFrame::kMaxPlanes]; CHECK_EQ(media::VideoFrame::kMaxPlanes, buffer_context->mailbox_holders().size()); for (int i = 0; i < media::VideoFrame::kMaxPlanes; i++) { mailbox_holder_array[i] = buffer_context->mailbox_holders()[i]; } frame = media::VideoFrame::WrapNativeTextures( info->pixel_format, mailbox_holder_array, media::VideoFrame::ReleaseMailboxCB(), gfx::Size(info->coded_size), gfx::Rect(info->visible_rect), info->visible_rect.size(), info->timestamp); break; } case VideoFrameBufferHandleType::GPU_MEMORY_BUFFER_HANDLE: { // Create GpuMemoryBuffer from handle. if (!buffer_context->GetGpuMemoryBuffer()) { gfx::BufferFormat gfx_format; switch (info->pixel_format) { case media::VideoPixelFormat::PIXEL_FORMAT_NV12: gfx_format = gfx::BufferFormat::YUV_420_BIPLANAR; break; default: LOG(FATAL) << "Unsupported pixel format"; return; } // The GpuMemoryBuffer is allocated and owned by the video capture // buffer pool from the video capture service process, so we don't need // to destroy the GpuMemoryBuffer here. auto gmb = gpu_memory_buffer_support_->CreateGpuMemoryBufferImplFromHandle( buffer_context->TakeGpuMemoryBufferHandle(), gfx::Size(info->coded_size), gfx_format, gfx::BufferUsage::SCANOUT_CAMERA_READ_WRITE, base::DoNothing()); buffer_context->SetGpuMemoryBuffer(std::move(gmb)); } CHECK(buffer_context->GetGpuMemoryBuffer()); // Clone the GpuMemoryBuffer and wrap it in a VideoFrame. std::unique_ptr gmb = gpu_memory_buffer_support_->CreateGpuMemoryBufferImplFromHandle( buffer_context->GetGpuMemoryBuffer()->CloneHandle(), buffer_context->GetGpuMemoryBuffer()->GetSize(), buffer_context->GetGpuMemoryBuffer()->GetFormat(), gfx::BufferUsage::SCANOUT_CAMERA_READ_WRITE, base::DoNothing()); media_task_runner_->PostTask( FROM_HERE, base::BindOnce( &BufferContext::BindBufferToTextureOnMediaThread, std::move(buffer_context), std::move(info), std::move(gmb), frame, media::BindToCurrentLoop(base::BindOnce( &VideoCaptureImpl::OnVideoFrameReady, weak_factory_.GetWeakPtr(), buffer_id, reference_time)))); return; } } OnVideoFrameReady(buffer_id, reference_time, std::move(info), std::move(frame), std::move(buffer_context)); } void VideoCaptureImpl::OnVideoFrameReady( int32_t buffer_id, base::TimeTicks reference_time, media::mojom::blink::VideoFrameInfoPtr info, scoped_refptr frame, scoped_refptr buffer_context) { DCHECK_CALLED_ON_VALID_THREAD(io_thread_checker_); if (!frame) { OnFrameDropped(media::VideoCaptureFrameDropReason:: kVideoCaptureImplFailedToWrapDataAsMediaVideoFrame); GetVideoCaptureHost()->ReleaseBuffer(device_id_, buffer_id, -1.0); return; } frame->AddDestructionObserver(base::BindOnce( &VideoCaptureImpl::DidFinishConsumingFrame, frame->metadata(), media::BindToCurrentLoop(base::BindOnce( &VideoCaptureImpl::OnAllClientsFinishedConsumingFrame, weak_factory_.GetWeakPtr(), buffer_id, std::move(buffer_context))))); if (info->color_space.has_value() && info->color_space->IsValid()) frame->set_color_space(info->color_space.value()); frame->metadata()->MergeInternalValuesFrom(info->metadata); // TODO(qiangchen): Dive into the full code path to let frame metadata hold // reference time rather than using an extra parameter. for (const auto& client : clients_) client.second.deliver_frame_cb.Run(frame, reference_time); } void VideoCaptureImpl::OnBufferDestroyed(int32_t buffer_id) { DCHECK_CALLED_ON_VALID_THREAD(io_thread_checker_); const auto& cb_iter = client_buffers_.find(buffer_id); if (cb_iter != client_buffers_.end()) { DCHECK(!cb_iter->second.get() || cb_iter->second->HasOneRef()) << "Instructed to delete buffer we are still using."; client_buffers_.erase(cb_iter); } } void VideoCaptureImpl::OnAllClientsFinishedConsumingFrame( int buffer_id, scoped_refptr buffer_context, double consumer_resource_utilization) { DCHECK_CALLED_ON_VALID_THREAD(io_thread_checker_); // Subtle race note: It's important that the |buffer_context| argument be // std::move()'ed to this method and never copied. This is so that the caller, // DidFinishConsumingFrame(), does not implicitly retain a reference while it // is running the trampoline callback on another thread. This is necessary to // ensure the reference count on the BufferContext will be correct at the time // OnBufferDestroyed() is called. http://crbug.com/797851 #if DCHECK_IS_ON() // The BufferContext should have exactly two references to it at this point, // one is this method's second argument and the other is from // |client_buffers_|. DCHECK(!buffer_context->HasOneRef()); BufferContext* const buffer_raw_ptr = buffer_context.get(); buffer_context = nullptr; // Now there should be only one reference, from |client_buffers_|. DCHECK(buffer_raw_ptr->HasOneRef()); #else buffer_context = nullptr; #endif GetVideoCaptureHost()->ReleaseBuffer(device_id_, buffer_id, consumer_resource_utilization); } void VideoCaptureImpl::StopDevice() { DCHECK_CALLED_ON_VALID_THREAD(io_thread_checker_); if (state_ != VIDEO_CAPTURE_STATE_STARTING && state_ != VIDEO_CAPTURE_STATE_STARTED) return; state_ = VIDEO_CAPTURE_STATE_STOPPING; OnLog("VideoCaptureImpl changing state to VIDEO_CAPTURE_STATE_STOPPING"); GetVideoCaptureHost()->Stop(device_id_); params_.requested_format.frame_size.SetSize(0, 0); } void VideoCaptureImpl::RestartCapture() { DCHECK_CALLED_ON_VALID_THREAD(io_thread_checker_); DCHECK_EQ(state_, VIDEO_CAPTURE_STATE_STOPPED); int width = 0; int height = 0; clients_.insert(clients_pending_on_restart_.begin(), clients_pending_on_restart_.end()); clients_pending_on_restart_.clear(); for (const auto& client : clients_) { width = std::max(width, client.second.params.requested_format.frame_size.width()); height = std::max( height, client.second.params.requested_format.frame_size.height()); } params_.requested_format.frame_size.SetSize(width, height); DVLOG(1) << __func__ << " " << params_.requested_format.frame_size.ToString(); StartCaptureInternal(); } void VideoCaptureImpl::StartCaptureInternal() { DCHECK_CALLED_ON_VALID_THREAD(io_thread_checker_); state_ = VIDEO_CAPTURE_STATE_STARTING; OnLog("VideoCaptureImpl changing state to VIDEO_CAPTURE_STATE_STARTING"); GetVideoCaptureHost()->Start(device_id_, session_id_, params_, observer_receiver_.BindNewPipeAndPassRemote()); } void VideoCaptureImpl::OnDeviceSupportedFormats( VideoCaptureDeviceFormatsCallback callback, const Vector& supported_formats) { DCHECK_CALLED_ON_VALID_THREAD(io_thread_checker_); std::move(callback).Run(supported_formats); } void VideoCaptureImpl::OnDeviceFormatsInUse( VideoCaptureDeviceFormatsCallback callback, const Vector& formats_in_use) { DCHECK_CALLED_ON_VALID_THREAD(io_thread_checker_); std::move(callback).Run(formats_in_use); } bool VideoCaptureImpl::RemoveClient(int client_id, ClientInfoMap* clients) { DCHECK_CALLED_ON_VALID_THREAD(io_thread_checker_); const ClientInfoMap::iterator it = clients->find(client_id); if (it == clients->end()) return false; it->second.state_update_cb.Run(blink::VIDEO_CAPTURE_STATE_STOPPED); clients->erase(it); return true; } media::mojom::blink::VideoCaptureHost* VideoCaptureImpl::GetVideoCaptureHost() { DCHECK_CALLED_ON_VALID_THREAD(io_thread_checker_); if (video_capture_host_for_testing_) return video_capture_host_for_testing_; if (!video_capture_host_.is_bound()) video_capture_host_.Bind(std::move(pending_video_capture_host_)); return video_capture_host_.get(); } // static void VideoCaptureImpl::DidFinishConsumingFrame( const media::VideoFrameMetadata* metadata, BufferFinishedCallback callback_to_io_thread) { // Note: This function may be called on any thread by the VideoFrame // destructor. |metadata| is still valid for read-access at this point. double consumer_resource_utilization = -1.0; if (!metadata->GetDouble(media::VideoFrameMetadata::RESOURCE_UTILIZATION, &consumer_resource_utilization)) { consumer_resource_utilization = -1.0; } std::move(callback_to_io_thread).Run(consumer_resource_utilization); } } // namespace blink