diff options
Diffstat (limited to 'Source/WebCore/Modules/mediasource/SourceBuffer.cpp')
-rw-r--r-- | Source/WebCore/Modules/mediasource/SourceBuffer.cpp | 1651 |
1 files changed, 1272 insertions, 379 deletions
diff --git a/Source/WebCore/Modules/mediasource/SourceBuffer.cpp b/Source/WebCore/Modules/mediasource/SourceBuffer.cpp index e642179f3..2b4ac5854 100644 --- a/Source/WebCore/Modules/mediasource/SourceBuffer.cpp +++ b/Source/WebCore/Modules/mediasource/SourceBuffer.cpp @@ -1,5 +1,6 @@ /* * Copyright (C) 2013 Google Inc. All rights reserved. + * Copyright (C) 2013-2017 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are @@ -34,8 +35,10 @@ #if ENABLE(MEDIA_SOURCE) #include "AudioTrackList.h" +#include "BufferSource.h" #include "Event.h" -#include "ExceptionCodePlaceholder.h" +#include "EventNames.h" +#include "ExceptionCode.h" #include "GenericEventQueue.h" #include "HTMLMediaElement.h" #include "InbandTextTrack.h" @@ -44,58 +47,71 @@ #include "MediaSample.h" #include "MediaSource.h" #include "SampleMap.h" +#include "SourceBufferList.h" #include "SourceBufferPrivate.h" #include "TextTrackList.h" #include "TimeRanges.h" #include "VideoTrackList.h" +#include <limits> #include <map> +#include <runtime/JSCInlines.h> +#include <runtime/JSLock.h> +#include <runtime/VM.h> +#include <wtf/CurrentTime.h> #include <wtf/NeverDestroyed.h> namespace WebCore { +static const double ExponentialMovingAverageCoefficient = 0.1; + struct SourceBuffer::TrackBuffer { MediaTime lastDecodeTimestamp; MediaTime lastFrameDuration; MediaTime highestPresentationTimestamp; MediaTime lastEnqueuedPresentationTime; - bool needRandomAccessFlag; - bool enabled; + MediaTime lastEnqueuedDecodeEndTime; + bool needRandomAccessFlag { true }; + bool enabled { false }; + bool needsReenqueueing { false }; SampleMap samples; - SampleMap::MapType decodeQueue; + DecodeOrderSampleMap::MapType decodeQueue; RefPtr<MediaDescription> description; + PlatformTimeRanges buffered; TrackBuffer() : lastDecodeTimestamp(MediaTime::invalidTime()) , lastFrameDuration(MediaTime::invalidTime()) , highestPresentationTimestamp(MediaTime::invalidTime()) , lastEnqueuedPresentationTime(MediaTime::invalidTime()) - , needRandomAccessFlag(true) - , enabled(false) + , lastEnqueuedDecodeEndTime(MediaTime::invalidTime()) { } }; -PassRef<SourceBuffer> SourceBuffer::create(PassRef<SourceBufferPrivate> sourceBufferPrivate, MediaSource* source) +Ref<SourceBuffer> SourceBuffer::create(Ref<SourceBufferPrivate>&& sourceBufferPrivate, MediaSource* source) { - RefPtr<SourceBuffer> sourceBuffer(adoptRef(new SourceBuffer(std::move(sourceBufferPrivate), source))); + auto sourceBuffer = adoptRef(*new SourceBuffer(WTFMove(sourceBufferPrivate), source)); sourceBuffer->suspendIfNeeded(); - return sourceBuffer.releaseNonNull(); + return sourceBuffer; } -SourceBuffer::SourceBuffer(PassRef<SourceBufferPrivate> sourceBufferPrivate, MediaSource* source) +SourceBuffer::SourceBuffer(Ref<SourceBufferPrivate>&& sourceBufferPrivate, MediaSource* source) : ActiveDOMObject(source->scriptExecutionContext()) - , m_private(std::move(sourceBufferPrivate)) + , m_private(WTFMove(sourceBufferPrivate)) , m_source(source) , m_asyncEventQueue(*this) - , m_updating(false) - , m_appendBufferTimer(this, &SourceBuffer::appendBufferTimerFired) - , m_highestPresentationEndTimestamp(MediaTime::invalidTime()) - , m_receivedFirstInitializationSegment(false) + , m_appendBufferTimer(*this, &SourceBuffer::appendBufferTimerFired) + , m_appendWindowStart(MediaTime::zeroTime()) + , m_appendWindowEnd(MediaTime::positiveInfiniteTime()) + , m_groupStartTimestamp(MediaTime::invalidTime()) + , m_groupEndTimestamp(MediaTime::zeroTime()) , m_buffered(TimeRanges::create()) - , m_active(false) , m_appendState(WaitingForSegment) + , m_timeOfBufferingMonitor(monotonicallyIncreasingTime()) + , m_pendingRemoveStart(MediaTime::invalidTime()) + , m_pendingRemoveEnd(MediaTime::invalidTime()) + , m_removeTimer(*this, &SourceBuffer::removeTimerFired) { - ASSERT(m_private); ASSERT(m_source); m_private->setClient(this); @@ -105,50 +121,37 @@ SourceBuffer::~SourceBuffer() { ASSERT(isRemoved()); - m_private->setClient(0); + m_private->setClient(nullptr); } -PassRefPtr<TimeRanges> SourceBuffer::buffered(ExceptionCode& ec) const +ExceptionOr<Ref<TimeRanges>> SourceBuffer::buffered() const { // Section 3.1 buffered attribute steps. // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#attributes-1 // 1. If this object has been removed from the sourceBuffers attribute of the parent media source then throw an // INVALID_STATE_ERR exception and abort these steps. - if (isRemoved()) { - ec = INVALID_STATE_ERR; - return nullptr; - } + if (isRemoved()) + return Exception { INVALID_STATE_ERR }; // 2. Return a new static normalized TimeRanges object for the media segments buffered. return m_buffered->copy(); } -const RefPtr<TimeRanges>& SourceBuffer::buffered() const -{ - return m_buffered; -} - double SourceBuffer::timestampOffset() const { return m_timestampOffset.toDouble(); } -void SourceBuffer::setTimestampOffset(double offset, ExceptionCode& ec) +ExceptionOr<void> SourceBuffer::setTimestampOffset(double offset) { // Section 3.1 timestampOffset attribute setter steps. // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#attributes-1 - // 1. If this object has been removed from the sourceBuffers attribute of the parent media source then throw an + // 1. Let new timestamp offset equal the new value being assigned to this attribute. + // 2. If this object has been removed from the sourceBuffers attribute of the parent media source, then throw an // INVALID_STATE_ERR exception and abort these steps. - if (isRemoved()) { - ec = INVALID_STATE_ERR; - return; - } - // 3. If the updating attribute equals true, then throw an INVALID_STATE_ERR exception and abort these steps. - if (m_updating) { - ec = INVALID_STATE_ERR; - return; - } + if (isRemoved() || m_updating) + return Exception { INVALID_STATE_ERR }; // 4. If the readyState attribute of the parent media source is in the "ended" state then run the following steps: // 4.1 Set the readyState attribute of the parent media source to "open" @@ -156,155 +159,277 @@ void SourceBuffer::setTimestampOffset(double offset, ExceptionCode& ec) m_source->openIfInEndedState(); // 5. If the append state equals PARSING_MEDIA_SEGMENT, then throw an INVALID_STATE_ERR and abort these steps. - if (m_appendState == ParsingMediaSegment) { - ec = INVALID_STATE_ERR; - return; - } + if (m_appendState == ParsingMediaSegment) + return Exception { INVALID_STATE_ERR }; + + MediaTime newTimestampOffset = MediaTime::createWithDouble(offset); + + // 6. If the mode attribute equals "sequence", then set the group start timestamp to new timestamp offset. + if (m_mode == AppendMode::Sequence) + m_groupStartTimestamp = newTimestampOffset; - // 6. Update the attribute to the new value. - m_timestampOffset = MediaTime::createWithDouble(offset); + // 7. Update the attribute to the new value. + m_timestampOffset = newTimestampOffset; + + return { }; } -void SourceBuffer::appendBuffer(PassRefPtr<ArrayBuffer> data, ExceptionCode& ec) +double SourceBuffer::appendWindowStart() const { - // Section 3.2 appendBuffer() - // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#widl-SourceBuffer-appendBuffer-void-ArrayBufferView-data - // 1. If data is null then throw an INVALID_ACCESS_ERR exception and abort these steps. - if (!data) { - ec = INVALID_ACCESS_ERR; - return; - } + return m_appendWindowStart.toDouble(); +} - appendBufferInternal(static_cast<unsigned char*>(data->data()), data->byteLength(), ec); +ExceptionOr<void> SourceBuffer::setAppendWindowStart(double newValue) +{ + // Section 3.1 appendWindowStart attribute setter steps. + // W3C Editor's Draft 16 September 2016 + // https://rawgit.com/w3c/media-source/45627646344eea0170dd1cbc5a3d508ca751abb8/media-source-respec.html#dom-sourcebuffer-appendwindowstart + // 1. If this object has been removed from the sourceBuffers attribute of the parent media source, + // then throw an InvalidStateError exception and abort these steps. + // 2. If the updating attribute equals true, then throw an InvalidStateError exception and abort these steps. + if (isRemoved() || m_updating) + return Exception { INVALID_STATE_ERR }; + + // 3. If the new value is less than 0 or greater than or equal to appendWindowEnd then + // throw an TypeError exception and abort these steps. + if (newValue < 0 || newValue >= m_appendWindowEnd.toDouble()) + return Exception { TypeError }; + + // 4. Update the attribute to the new value. + m_appendWindowStart = MediaTime::createWithDouble(newValue); + + return { }; } -void SourceBuffer::appendBuffer(PassRefPtr<ArrayBufferView> data, ExceptionCode& ec) +double SourceBuffer::appendWindowEnd() const { - // Section 3.2 appendBuffer() - // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#widl-SourceBuffer-appendBuffer-void-ArrayBufferView-data - // 1. If data is null then throw an INVALID_ACCESS_ERR exception and abort these steps. - if (!data) { - ec = INVALID_ACCESS_ERR; - return; + return m_appendWindowEnd.toDouble(); +} + +ExceptionOr<void> SourceBuffer::setAppendWindowEnd(double newValue) +{ + // Section 3.1 appendWindowEnd attribute setter steps. + // W3C Editor's Draft 16 September 2016 + // https://rawgit.com/w3c/media-source/45627646344eea0170dd1cbc5a3d508ca751abb8/media-source-respec.html#dom-sourcebuffer-appendwindowend + // 1. If this object has been removed from the sourceBuffers attribute of the parent media source, + // then throw an InvalidStateError exception and abort these steps. + // 2. If the updating attribute equals true, then throw an InvalidStateError exception and abort these steps. + if (isRemoved() || m_updating) + return Exception { INVALID_STATE_ERR }; + + // 3. If the new value equals NaN, then throw an TypeError and abort these steps. + // 4. If the new value is less than or equal to appendWindowStart then throw an TypeError exception + // and abort these steps. + if (std::isnan(newValue) || newValue <= m_appendWindowStart.toDouble()) + return Exception { TypeError }; + + // 5.. Update the attribute to the new value. + m_appendWindowEnd = MediaTime::createWithDouble(newValue); + + return { }; +} + +ExceptionOr<void> SourceBuffer::appendBuffer(const BufferSource& data) +{ + return appendBufferInternal(static_cast<const unsigned char*>(data.data()), data.length()); +} + +void SourceBuffer::resetParserState() +{ + // Section 3.5.2 Reset Parser State algorithm steps. + // http://www.w3.org/TR/2014/CR-media-source-20140717/#sourcebuffer-reset-parser-state + // 1. If the append state equals PARSING_MEDIA_SEGMENT and the input buffer contains some complete coded frames, + // then run the coded frame processing algorithm until all of these complete coded frames have been processed. + // FIXME: If any implementation will work in pulling mode (instead of async push to SourceBufferPrivate, and forget) + // this should be handled somehow either here, or in m_private->abort(); + + // 2. Unset the last decode timestamp on all track buffers. + // 3. Unset the last frame duration on all track buffers. + // 4. Unset the highest presentation timestamp on all track buffers. + // 5. Set the need random access point flag on all track buffers to true. + for (auto& trackBufferPair : m_trackBufferMap.values()) { + trackBufferPair.lastDecodeTimestamp = MediaTime::invalidTime(); + trackBufferPair.lastFrameDuration = MediaTime::invalidTime(); + trackBufferPair.highestPresentationTimestamp = MediaTime::invalidTime(); + trackBufferPair.needRandomAccessFlag = true; } + // 6. Remove all bytes from the input buffer. + // Note: this is handled by abortIfUpdating() + // 7. Set append state to WAITING_FOR_SEGMENT. + m_appendState = WaitingForSegment; - appendBufferInternal(static_cast<unsigned char*>(data->baseAddress()), data->byteLength(), ec); + m_private->resetParserState(); } -void SourceBuffer::abort(ExceptionCode& ec) +ExceptionOr<void> SourceBuffer::abort() { // Section 3.2 abort() method steps. - // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#widl-SourceBuffer-abort-void + // https://rawgit.com/w3c/media-source/45627646344eea0170dd1cbc5a3d508ca751abb8/media-source-respec.html#dom-sourcebuffer-abort // 1. If this object has been removed from the sourceBuffers attribute of the parent media source // then throw an INVALID_STATE_ERR exception and abort these steps. // 2. If the readyState attribute of the parent media source is not in the "open" state // then throw an INVALID_STATE_ERR exception and abort these steps. - if (isRemoved() || !m_source->isOpen()) { - ec = INVALID_STATE_ERR; - return; - } + if (isRemoved() || !m_source->isOpen()) + return Exception { INVALID_STATE_ERR }; - // 3. If the sourceBuffer.updating attribute equals true, then run the following steps: ... + // 3. If the range removal algorithm is running, then throw an InvalidStateError exception and abort these steps. + if (m_removeTimer.isActive()) + return Exception { INVALID_STATE_ERR }; + + // 4. If the sourceBuffer.updating attribute equals true, then run the following steps: ... abortIfUpdating(); - // 4. Run the reset parser state algorithm. - m_private->abort(); + // 5. Run the reset parser state algorithm. + resetParserState(); + + // 6. Set appendWindowStart to the presentation start time. + m_appendWindowStart = MediaTime::zeroTime(); + + // 7. Set appendWindowEnd to positive Infinity. + m_appendWindowEnd = MediaTime::positiveInfiniteTime(); + + return { }; +} + +ExceptionOr<void> SourceBuffer::remove(double start, double end) +{ + return remove(MediaTime::createWithDouble(start), MediaTime::createWithDouble(end)); +} + +ExceptionOr<void> SourceBuffer::remove(const MediaTime& start, const MediaTime& end) +{ + LOG(MediaSource, "SourceBuffer::remove(%p) - start(%lf), end(%lf)", this, start.toDouble(), end.toDouble()); + + // https://rawgit.com/w3c/media-source/45627646344eea0170dd1cbc5a3d508ca751abb8/media-source-respec.html#dom-sourcebuffer-remove + // Section 3.2 remove() method steps. + // 1. If this object has been removed from the sourceBuffers attribute of the parent media source then throw + // an InvalidStateError exception and abort these steps. + // 2. If the updating attribute equals true, then throw an InvalidStateError exception and abort these steps. + if (isRemoved() || m_updating) + return Exception { INVALID_STATE_ERR }; + + // 3. If duration equals NaN, then throw a TypeError exception and abort these steps. + // 4. If start is negative or greater than duration, then throw a TypeError exception and abort these steps. + // 5. If end is less than or equal to start or end equals NaN, then throw a TypeError exception and abort these steps. + if (m_source->duration().isInvalid() + || end.isInvalid() + || start.isInvalid() + || start < MediaTime::zeroTime() + || start > m_source->duration() + || end <= start) { + return Exception { TypeError }; + } - // FIXME(229408) Add steps 5-6 update appendWindowStart & appendWindowEnd. + // 6. If the readyState attribute of the parent media source is in the "ended" state then run the following steps: + // 6.1. Set the readyState attribute of the parent media source to "open" + // 6.2. Queue a task to fire a simple event named sourceopen at the parent media source . + m_source->openIfInEndedState(); + + // 7. Run the range removal algorithm with start and end as the start and end of the removal range. + rangeRemoval(start, end); + + return { }; } +void SourceBuffer::rangeRemoval(const MediaTime& start, const MediaTime& end) +{ + // 3.5.7 Range Removal + // https://rawgit.com/w3c/media-source/7bbe4aa33c61ec025bc7acbd80354110f6a000f9/media-source.html#sourcebuffer-range-removal + // 1. Let start equal the starting presentation timestamp for the removal range. + // 2. Let end equal the end presentation timestamp for the removal range. + // 3. Set the updating attribute to true. + m_updating = true; + + // 4. Queue a task to fire a simple event named updatestart at this SourceBuffer object. + scheduleEvent(eventNames().updatestartEvent); + + // 5. Return control to the caller and run the rest of the steps asynchronously. + m_pendingRemoveStart = start; + m_pendingRemoveEnd = end; + m_removeTimer.startOneShot(0); +} void SourceBuffer::abortIfUpdating() { - // Section 3.2 abort() method step 3 substeps. - // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#widl-SourceBuffer-abort-void + // Section 3.2 abort() method step 4 substeps. + // https://rawgit.com/w3c/media-source/45627646344eea0170dd1cbc5a3d508ca751abb8/media-source-respec.html#dom-sourcebuffer-abort if (!m_updating) return; - // 3.1. Abort the buffer append and stream append loop algorithms if they are running. + // 4.1. Abort the buffer append algorithm if it is running. m_appendBufferTimer.stop(); m_pendingAppendData.clear(); + m_private->abort(); - // 3.2. Set the updating attribute to false. + // 4.2. Set the updating attribute to false. m_updating = false; - // 3.3. Queue a task to fire a simple event named abort at this SourceBuffer object. + // 4.3. Queue a task to fire a simple event named abort at this SourceBuffer object. scheduleEvent(eventNames().abortEvent); - // 3.4. Queue a task to fire a simple event named updateend at this SourceBuffer object. + // 4.4. Queue a task to fire a simple event named updateend at this SourceBuffer object. scheduleEvent(eventNames().updateendEvent); } -void SourceBuffer::removedFromMediaSource() +MediaTime SourceBuffer::highestPresentationTimestamp() const { - if (isRemoved()) - return; - - m_private->removedFromMediaSource(); - m_source = 0; - m_asyncEventQueue.close(); + MediaTime highestTime; + for (auto& trackBuffer : m_trackBufferMap.values()) { + auto lastSampleIter = trackBuffer.samples.presentationOrder().rbegin(); + if (lastSampleIter == trackBuffer.samples.presentationOrder().rend()) + continue; + highestTime = std::max(highestTime, lastSampleIter->first); + } + return highestTime; } -void SourceBuffer::sourceBufferPrivateSeekToTime(SourceBufferPrivate*, const MediaTime& time) +void SourceBuffer::readyStateChanged() { - LOG(Media, "SourceBuffer::sourceBufferPrivateSeekToTime(%p)", this); - - for (auto trackBufferIterator = m_trackBufferMap.begin(); trackBufferIterator != m_trackBufferMap.end(); ++trackBufferIterator) { - TrackBuffer& trackBuffer = trackBufferIterator->value; - AtomicString trackID = trackBufferIterator->key; - - // Find the sample which contains the current presentation time. - auto currentSamplePTSIterator = trackBuffer.samples.findSampleContainingPresentationTime(time); + updateBufferedFromTrackBuffers(); +} - if (currentSamplePTSIterator == trackBuffer.samples.presentationEnd()) { - trackBuffer.decodeQueue.clear(); - m_private->flushAndEnqueueNonDisplayingSamples(Vector<RefPtr<MediaSample>>(), trackID); - continue; - } +void SourceBuffer::removedFromMediaSource() +{ + if (isRemoved()) + return; - // Seach backward for the previous sync sample. - MediaTime currentSampleDecodeTime = currentSamplePTSIterator->second->decodeTime(); - auto currentSampleDTSIterator = trackBuffer.samples.findSampleWithDecodeTime(currentSampleDecodeTime); - ASSERT(currentSampleDTSIterator != trackBuffer.samples.decodeEnd()); + abortIfUpdating(); - auto reverseCurrentSampleIter = --SampleMap::reverse_iterator(currentSampleDTSIterator); - auto reverseLastSyncSampleIter = trackBuffer.samples.findSyncSamplePriorToDecodeIterator(reverseCurrentSampleIter); - if (reverseLastSyncSampleIter == trackBuffer.samples.reverseDecodeEnd()) { - trackBuffer.decodeQueue.clear(); - m_private->flushAndEnqueueNonDisplayingSamples(Vector<RefPtr<MediaSample>>(), trackID); - continue; - } + for (auto& trackBufferPair : m_trackBufferMap.values()) { + trackBufferPair.samples.clear(); + trackBufferPair.decodeQueue.clear(); + } - Vector<RefPtr<MediaSample>> nonDisplayingSamples; - for (auto iter = reverseLastSyncSampleIter; iter != reverseCurrentSampleIter; --iter) - nonDisplayingSamples.append(iter->second); + m_private->removedFromMediaSource(); + m_source = nullptr; +} - m_private->flushAndEnqueueNonDisplayingSamples(nonDisplayingSamples, trackID); +void SourceBuffer::seekToTime(const MediaTime& time) +{ + LOG(MediaSource, "SourceBuffer::seekToTime(%p) - time(%s)", this, toString(time).utf8().data()); - // Fill the decode queue with the remaining samples. - trackBuffer.decodeQueue.clear(); - for (auto iter = currentSampleDTSIterator; iter != trackBuffer.samples.decodeEnd(); ++iter) - trackBuffer.decodeQueue.insert(*iter); + for (auto& trackBufferPair : m_trackBufferMap) { + TrackBuffer& trackBuffer = trackBufferPair.value; + const AtomicString& trackID = trackBufferPair.key; - provideMediaData(trackBuffer, trackID); + trackBuffer.needsReenqueueing = true; + reenqueueMediaForTime(trackBuffer, trackID, time); } } -MediaTime SourceBuffer::sourceBufferPrivateFastSeekTimeForMediaTime(SourceBufferPrivate*, const MediaTime& targetTime, const MediaTime& negativeThreshold, const MediaTime& positiveThreshold) +MediaTime SourceBuffer::sourceBufferPrivateFastSeekTimeForMediaTime(const MediaTime& targetTime, const MediaTime& negativeThreshold, const MediaTime& positiveThreshold) { MediaTime seekTime = targetTime; MediaTime lowerBoundTime = targetTime - negativeThreshold; MediaTime upperBoundTime = targetTime + positiveThreshold; - for (auto trackBufferIterator = m_trackBufferMap.begin(); trackBufferIterator != m_trackBufferMap.end(); ++trackBufferIterator) { - TrackBuffer& trackBuffer = trackBufferIterator->value; - + for (auto& trackBuffer : m_trackBufferMap.values()) { // Find the sample which contains the target time time. - auto futureSyncSampleIterator = trackBuffer.samples.findSyncSampleAfterPresentationTime(targetTime, positiveThreshold); - auto pastSyncSampleIterator = trackBuffer.samples.findSyncSamplePriorToPresentationTime(targetTime, negativeThreshold); - auto upperBound = trackBuffer.samples.decodeEnd(); - auto lowerBound = trackBuffer.samples.reverseDecodeEnd(); + auto futureSyncSampleIterator = trackBuffer.samples.decodeOrder().findSyncSampleAfterPresentationTime(targetTime, positiveThreshold); + auto pastSyncSampleIterator = trackBuffer.samples.decodeOrder().findSyncSamplePriorToPresentationTime(targetTime, negativeThreshold); + auto upperBound = trackBuffer.samples.decodeOrder().end(); + auto lowerBound = trackBuffer.samples.decodeOrder().rend(); if (futureSyncSampleIterator == upperBound && pastSyncSampleIterator == lowerBound) continue; @@ -331,12 +456,23 @@ MediaTime SourceBuffer::sourceBufferPrivateFastSeekTimeForMediaTime(SourceBuffer bool SourceBuffer::hasPendingActivity() const { - return m_source; + return m_source || m_asyncEventQueue.hasPendingEvents(); } void SourceBuffer::stop() { m_appendBufferTimer.stop(); + m_removeTimer.stop(); +} + +bool SourceBuffer::canSuspendForDocumentSuspension() const +{ + return !hasPendingActivity(); +} + +const char* SourceBuffer::activeDOMObjectName() const +{ + return "SourceBuffer"; } bool SourceBuffer::isRemoved() const @@ -346,13 +482,13 @@ bool SourceBuffer::isRemoved() const void SourceBuffer::scheduleEvent(const AtomicString& eventName) { - RefPtr<Event> event = Event::create(eventName, false, false); + auto event = Event::create(eventName, false, false); event->setTarget(this); - m_asyncEventQueue.enqueueEvent(event.release()); + m_asyncEventQueue.enqueueEvent(WTFMove(event)); } -void SourceBuffer::appendBufferInternal(unsigned char* data, unsigned size, ExceptionCode& ec) +ExceptionOr<void> SourceBuffer::appendBufferInternal(const unsigned char* data, unsigned size) { // Section 3.2 appendBuffer() // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#widl-SourceBuffer-appendBuffer-void-ArrayBufferView-data @@ -364,10 +500,8 @@ void SourceBuffer::appendBufferInternal(unsigned char* data, unsigned size, Exce // 1. If the SourceBuffer has been removed from the sourceBuffers attribute of the parent media source // then throw an INVALID_STATE_ERR exception and abort these steps. // 2. If the updating attribute equals true, then throw an INVALID_STATE_ERR exception and abort these steps. - if (isRemoved() || m_updating) { - ec = INVALID_STATE_ERR; - return; - } + if (isRemoved() || m_updating) + return Exception { INVALID_STATE_ERR }; // 3. If the readyState attribute of the parent media source is in the "ended" state then run the following steps: // 3.1. Set the readyState attribute of the parent media source to "open" @@ -375,13 +509,16 @@ void SourceBuffer::appendBufferInternal(unsigned char* data, unsigned size, Exce m_source->openIfInEndedState(); // 4. Run the coded frame eviction algorithm. - m_private->evictCodedFrames(); + evictCodedFrames(size); + // FIXME: enable this code when MSE libraries have been updated to support it. +#if USE(GSTREAMER) // 5. If the buffer full flag equals true, then throw a QUOTA_EXCEEDED_ERR exception and abort these step. - if (m_private->isFull()) { - ec = QUOTA_EXCEEDED_ERR; - return; + if (m_bufferFull) { + LOG(MediaSource, "SourceBuffer::appendBufferInternal(%p) - buffer full, failing with QUOTA_EXCEEDED_ERR error", this); + return Exception { QUOTA_EXCEEDED_ERR }; } +#endif // NOTE: Return to 3.2 appendBuffer() // 3. Add data to the end of the input buffer. @@ -395,10 +532,17 @@ void SourceBuffer::appendBufferInternal(unsigned char* data, unsigned size, Exce // 6. Asynchronously run the buffer append algorithm. m_appendBufferTimer.startOneShot(0); + + reportExtraMemoryAllocated(); + + return { }; } -void SourceBuffer::appendBufferTimerFired(Timer<SourceBuffer>&) +void SourceBuffer::appendBufferTimerFired() { + if (isRemoved()) + return; + ASSERT(m_updating); // Section 3.5.5 Buffer Append Algorithm @@ -417,31 +561,44 @@ void SourceBuffer::appendBufferTimerFired(Timer<SourceBuffer>&) // https://dvcs.w3.org/hg/html-media/raw-file/tip/media-source/media-source.html#sourcebuffer-segment-parser-loop // When the segment parser loop algorithm is invoked, run the following steps: - SourceBufferPrivate::AppendResult result = SourceBufferPrivate::AppendSucceeded; - do { - // 1. Loop Top: If the input buffer is empty, then jump to the need more data step below. - if (!m_pendingAppendData.size()) - break; + // 1. Loop Top: If the input buffer is empty, then jump to the need more data step below. + if (!m_pendingAppendData.size()) { + sourceBufferPrivateAppendComplete(AppendSucceeded); + return; + } - result = m_private->append(m_pendingAppendData.data(), appendSize); - m_pendingAppendData.clear(); + m_private->append(m_pendingAppendData.data(), appendSize); + m_pendingAppendData.clear(); +} - // 2. If the input buffer contains bytes that violate the SourceBuffer byte stream format specification, - // then run the end of stream algorithm with the error parameter set to "decode" and abort this algorithm. - if (result == SourceBufferPrivate::ParsingFailed) { - m_source->streamEndedWithError(decodeError(), IgnorableExceptionCode()); - break; - } +void SourceBuffer::sourceBufferPrivateAppendComplete(AppendResult result) +{ + if (isRemoved()) + return; - // NOTE: Steps 3 - 6 enforced by sourceBufferPrivateDidReceiveInitializationSegment() and - // sourceBufferPrivateDidReceiveSample below. + // Resolve the changes it TrackBuffers' buffered ranges + // into the SourceBuffer's buffered ranges + updateBufferedFromTrackBuffers(); - // 7. Need more data: Return control to the calling algorithm. - } while (0); + // Section 3.5.5 Buffer Append Algorithm, ctd. + // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#sourcebuffer-buffer-append + + // 2. If the input buffer contains bytes that violate the SourceBuffer byte stream format specification, + // then run the append error algorithm with the decode error parameter set to true and abort this algorithm. + if (result == ParsingFailed) { + LOG(MediaSource, "SourceBuffer::sourceBufferPrivateAppendComplete(%p) - result = ParsingFailed", this); + appendError(true); + return; + } + + // NOTE: Steps 3 - 6 enforced by sourceBufferPrivateDidReceiveInitializationSegment() and + // sourceBufferPrivateDidReceiveSample below. + + // 7. Need more data: Return control to the calling algorithm. // NOTE: return to Section 3.5.5 // 2.If the segment parser loop algorithm in the previous step was aborted, then abort this algorithm. - if (result != SourceBufferPrivate::AppendSucceeded) + if (result != AppendSucceeded) return; // 3. Set the updating attribute to false. @@ -453,54 +610,364 @@ void SourceBuffer::appendBufferTimerFired(Timer<SourceBuffer>&) // 5. Queue a task to fire a simple event named updateend at this SourceBuffer object. scheduleEvent(eventNames().updateendEvent); - m_source->monitorSourceBuffers(); - for (auto iter = m_trackBufferMap.begin(), end = m_trackBufferMap.end(); iter != end; ++iter) - provideMediaData(iter->value, iter->key); + if (m_source) + m_source->monitorSourceBuffers(); + + MediaTime currentMediaTime = m_source->currentTime(); + for (auto& trackBufferPair : m_trackBufferMap) { + TrackBuffer& trackBuffer = trackBufferPair.value; + const AtomicString& trackID = trackBufferPair.key; + + if (trackBuffer.needsReenqueueing) { + LOG(MediaSource, "SourceBuffer::sourceBufferPrivateAppendComplete(%p) - reenqueuing at time (%s)", this, toString(currentMediaTime).utf8().data()); + reenqueueMediaForTime(trackBuffer, trackID, currentMediaTime); + } else + provideMediaData(trackBuffer, trackID); + } + + reportExtraMemoryAllocated(); + if (extraMemoryCost() > this->maximumBufferSize()) + m_bufferFull = true; + + LOG(Media, "SourceBuffer::sourceBufferPrivateAppendComplete(%p) - buffered = %s", this, toString(m_buffered->ranges()).utf8().data()); } -const AtomicString& SourceBuffer::decodeError() +void SourceBuffer::sourceBufferPrivateDidReceiveRenderingError(int error) { - static NeverDestroyed<AtomicString> decode("decode", AtomicString::ConstructFromLiteral); - return decode; +#if LOG_DISABLED + UNUSED_PARAM(error); +#endif + + LOG(MediaSource, "SourceBuffer::sourceBufferPrivateDidReceiveRenderingError(%p) - result = %i", this, error); + + if (!isRemoved()) + m_source->streamEndedWithError(MediaSource::EndOfStreamError::Decode); } -const AtomicString& SourceBuffer::networkError() +static bool decodeTimeComparator(const PresentationOrderSampleMap::MapType::value_type& a, const PresentationOrderSampleMap::MapType::value_type& b) { - static NeverDestroyed<AtomicString> network("network", AtomicString::ConstructFromLiteral); - return network; + return a.second->decodeTime() < b.second->decodeTime(); } -VideoTrackList* SourceBuffer::videoTracks() +static PlatformTimeRanges removeSamplesFromTrackBuffer(const DecodeOrderSampleMap::MapType& samples, SourceBuffer::TrackBuffer& trackBuffer, const SourceBuffer* buffer, const char* logPrefix) { - if (!m_source->mediaElement()) - return nullptr; +#if !LOG_DISABLED + MediaTime earliestSample = MediaTime::positiveInfiniteTime(); + MediaTime latestSample = MediaTime::zeroTime(); + size_t bytesRemoved = 0; +#else + UNUSED_PARAM(logPrefix); + UNUSED_PARAM(buffer); +#endif - if (!m_videoTracks) - m_videoTracks = VideoTrackList::create(m_source->mediaElement(), ActiveDOMObject::scriptExecutionContext()); + PlatformTimeRanges erasedRanges; + for (auto sampleIt : samples) { + const DecodeOrderSampleMap::KeyType& decodeKey = sampleIt.first; +#if !LOG_DISABLED + size_t startBufferSize = trackBuffer.samples.sizeInBytes(); +#endif + + RefPtr<MediaSample>& sample = sampleIt.second; + LOG(MediaSource, "SourceBuffer::%s(%p) - removing sample(%s)", logPrefix, buffer, toString(*sampleIt.second).utf8().data()); + + // Remove the erased samples from the TrackBuffer sample map. + trackBuffer.samples.removeSample(sample.get()); + + // Also remove the erased samples from the TrackBuffer decodeQueue. + trackBuffer.decodeQueue.erase(decodeKey); + + auto startTime = sample->presentationTime(); + auto endTime = startTime + sample->duration(); + erasedRanges.add(startTime, endTime); + +#if !LOG_DISABLED + bytesRemoved += startBufferSize - trackBuffer.samples.sizeInBytes(); + if (startTime < earliestSample) + earliestSample = startTime; + if (endTime > latestSample) + latestSample = endTime; +#endif + } + + // Because we may have added artificial padding in the buffered ranges when adding samples, we may + // need to remove that padding when removing those same samples. Walk over the erased ranges looking + // for unbuffered areas and expand erasedRanges to encompass those areas. + PlatformTimeRanges additionalErasedRanges; + for (unsigned i = 0; i < erasedRanges.length(); ++i) { + auto erasedStart = erasedRanges.start(i); + auto erasedEnd = erasedRanges.end(i); + auto startIterator = trackBuffer.samples.presentationOrder().reverseFindSampleBeforePresentationTime(erasedStart); + if (startIterator == trackBuffer.samples.presentationOrder().rend()) + additionalErasedRanges.add(MediaTime::zeroTime(), erasedStart); + else { + auto& previousSample = *startIterator->second; + if (previousSample.presentationTime() + previousSample.duration() < erasedStart) + additionalErasedRanges.add(previousSample.presentationTime() + previousSample.duration(), erasedStart); + } + + auto endIterator = trackBuffer.samples.presentationOrder().findSampleStartingOnOrAfterPresentationTime(erasedEnd); + if (endIterator == trackBuffer.samples.presentationOrder().end()) + additionalErasedRanges.add(erasedEnd, MediaTime::positiveInfiniteTime()); + else { + auto& nextSample = *endIterator->second; + if (nextSample.presentationTime() > erasedEnd) + additionalErasedRanges.add(erasedEnd, nextSample.presentationTime()); + } + } + if (additionalErasedRanges.length()) + erasedRanges.unionWith(additionalErasedRanges); + +#if !LOG_DISABLED + if (bytesRemoved) + LOG(MediaSource, "SourceBuffer::%s(%p) removed %zu bytes, start(%lf), end(%lf)", logPrefix, buffer, bytesRemoved, earliestSample.toDouble(), latestSample.toDouble()); +#endif + + return erasedRanges; +} + +void SourceBuffer::removeCodedFrames(const MediaTime& start, const MediaTime& end) +{ + LOG(MediaSource, "SourceBuffer::removeCodedFrames(%p) - start(%s), end(%s)", this, toString(start).utf8().data(), toString(end).utf8().data()); + + // 3.5.9 Coded Frame Removal Algorithm + // https://dvcs.w3.org/hg/html-media/raw-file/tip/media-source/media-source.html#sourcebuffer-coded-frame-removal + + // 1. Let start be the starting presentation timestamp for the removal range. + MediaTime durationMediaTime = m_source->duration(); + MediaTime currentMediaTime = m_source->currentTime(); + + // 2. Let end be the end presentation timestamp for the removal range. + // 3. For each track buffer in this source buffer, run the following steps: + for (auto& trackBuffer : m_trackBufferMap.values()) { + // 3.1. Let remove end timestamp be the current value of duration + // 3.2 If this track buffer has a random access point timestamp that is greater than or equal to end, then update + // remove end timestamp to that random access point timestamp. + + // NOTE: To handle MediaSamples which may be an amalgamation of multiple shorter samples, find samples whose presentation + // interval straddles the start and end times, and divide them if possible: + auto divideSampleIfPossibleAtPresentationTime = [&] (const MediaTime& time) { + auto sampleIterator = trackBuffer.samples.presentationOrder().findSampleContainingPresentationTime(time); + if (sampleIterator == trackBuffer.samples.presentationOrder().end()) + return; + RefPtr<MediaSample> sample = sampleIterator->second; + if (!sample->isDivisable()) + return; + std::pair<RefPtr<MediaSample>, RefPtr<MediaSample>> replacementSamples = sample->divide(time); + if (!replacementSamples.first || !replacementSamples.second) + return; + LOG(MediaSource, "SourceBuffer::removeCodedFrames(%p) - splitting sample (%s) into\n\t(%s)\n\t(%s)", this, + toString(sample).utf8().data(), + toString(replacementSamples.first).utf8().data(), + toString(replacementSamples.second).utf8().data()); + trackBuffer.samples.removeSample(sample.get()); + trackBuffer.samples.addSample(*replacementSamples.first); + trackBuffer.samples.addSample(*replacementSamples.second); + }; + divideSampleIfPossibleAtPresentationTime(start); + divideSampleIfPossibleAtPresentationTime(end); + + // NOTE: findSyncSampleAfterPresentationTime will return the next sync sample on or after the presentation time + // or decodeOrder().end() if no sync sample exists after that presentation time. + DecodeOrderSampleMap::iterator removeDecodeEnd = trackBuffer.samples.decodeOrder().findSyncSampleAfterPresentationTime(end); + PresentationOrderSampleMap::iterator removePresentationEnd; + if (removeDecodeEnd == trackBuffer.samples.decodeOrder().end()) + removePresentationEnd = trackBuffer.samples.presentationOrder().end(); + else + removePresentationEnd = trackBuffer.samples.presentationOrder().findSampleWithPresentationTime(removeDecodeEnd->second->presentationTime()); + + PresentationOrderSampleMap::iterator removePresentationStart = trackBuffer.samples.presentationOrder().findSampleStartingOnOrAfterPresentationTime(start); + if (removePresentationStart == removePresentationEnd) + continue; + + // 3.3 Remove all media data, from this track buffer, that contain starting timestamps greater than or equal to + // start and less than the remove end timestamp. + // NOTE: frames must be removed in decode order, so that all dependant frames between the frame to be removed + // and the next sync sample frame are removed. But we must start from the first sample in decode order, not + // presentation order. + PresentationOrderSampleMap::iterator minDecodeTimeIter = std::min_element(removePresentationStart, removePresentationEnd, decodeTimeComparator); + DecodeOrderSampleMap::KeyType decodeKey(minDecodeTimeIter->second->decodeTime(), minDecodeTimeIter->second->presentationTime()); + DecodeOrderSampleMap::iterator removeDecodeStart = trackBuffer.samples.decodeOrder().findSampleWithDecodeKey(decodeKey); + + DecodeOrderSampleMap::MapType erasedSamples(removeDecodeStart, removeDecodeEnd); + PlatformTimeRanges erasedRanges = removeSamplesFromTrackBuffer(erasedSamples, trackBuffer, this, "removeCodedFrames"); + + // Only force the TrackBuffer to re-enqueue if the removed ranges overlap with enqueued and possibly + // not yet displayed samples. + if (trackBuffer.lastEnqueuedPresentationTime.isValid() && currentMediaTime < trackBuffer.lastEnqueuedPresentationTime) { + PlatformTimeRanges possiblyEnqueuedRanges(currentMediaTime, trackBuffer.lastEnqueuedPresentationTime); + possiblyEnqueuedRanges.intersectWith(erasedRanges); + if (possiblyEnqueuedRanges.length()) + trackBuffer.needsReenqueueing = true; + } + + erasedRanges.invert(); + trackBuffer.buffered.intersectWith(erasedRanges); + setBufferedDirty(true); + + // 3.4 If this object is in activeSourceBuffers, the current playback position is greater than or equal to start + // and less than the remove end timestamp, and HTMLMediaElement.readyState is greater than HAVE_METADATA, then set + // the HTMLMediaElement.readyState attribute to HAVE_METADATA and stall playback. + if (m_active && currentMediaTime >= start && currentMediaTime < end && m_private->readyState() > MediaPlayer::HaveMetadata) + m_private->setReadyState(MediaPlayer::HaveMetadata); + } + + updateBufferedFromTrackBuffers(); + + // 4. If buffer full flag equals true and this object is ready to accept more bytes, then set the buffer full flag to false. + // No-op - return m_videoTracks.get(); + LOG(Media, "SourceBuffer::removeCodedFrames(%p) - buffered = %s", this, toString(m_buffered->ranges()).utf8().data()); } -AudioTrackList* SourceBuffer::audioTracks() +void SourceBuffer::removeTimerFired() { - if (!m_source->mediaElement()) - return nullptr; + if (isRemoved()) + return; - if (!m_audioTracks) - m_audioTracks = AudioTrackList::create(m_source->mediaElement(), ActiveDOMObject::scriptExecutionContext()); + ASSERT(m_updating); + ASSERT(m_pendingRemoveStart.isValid()); + ASSERT(m_pendingRemoveStart < m_pendingRemoveEnd); + + // Section 3.5.7 Range Removal + // http://w3c.github.io/media-source/#sourcebuffer-range-removal + + // 6. Run the coded frame removal algorithm with start and end as the start and end of the removal range. + removeCodedFrames(m_pendingRemoveStart, m_pendingRemoveEnd); - return m_audioTracks.get(); + // 7. Set the updating attribute to false. + m_updating = false; + m_pendingRemoveStart = MediaTime::invalidTime(); + m_pendingRemoveEnd = MediaTime::invalidTime(); + + // 8. Queue a task to fire a simple event named update at this SourceBuffer object. + scheduleEvent(eventNames().updateEvent); + + // 9. Queue a task to fire a simple event named updateend at this SourceBuffer object. + scheduleEvent(eventNames().updateendEvent); } -TextTrackList* SourceBuffer::textTracks() +void SourceBuffer::evictCodedFrames(size_t newDataSize) { - if (!m_source->mediaElement()) - return nullptr; + // 3.5.13 Coded Frame Eviction Algorithm + // http://www.w3.org/TR/media-source/#sourcebuffer-coded-frame-eviction - if (!m_textTracks) - m_textTracks = TextTrackList::create(m_source->mediaElement(), ActiveDOMObject::scriptExecutionContext()); + if (isRemoved()) + return; + + // This algorithm is run to free up space in this source buffer when new data is appended. + // 1. Let new data equal the data that is about to be appended to this SourceBuffer. + // 2. If the buffer full flag equals false, then abort these steps. + if (!m_bufferFull) + return; + + size_t maximumBufferSize = this->maximumBufferSize(); + + // 3. Let removal ranges equal a list of presentation time ranges that can be evicted from + // the presentation to make room for the new data. + + // NOTE: begin by removing data from the beginning of the buffered ranges, 30 seconds at + // a time, up to 30 seconds before currentTime. + MediaTime thirtySeconds = MediaTime(30, 1); + MediaTime currentTime = m_source->currentTime(); + MediaTime maximumRangeEnd = currentTime - thirtySeconds; + +#if !LOG_DISABLED + LOG(MediaSource, "SourceBuffer::evictCodedFrames(%p) - currentTime = %lf, require %zu bytes, maximum buffer size is %zu", this, m_source->currentTime().toDouble(), extraMemoryCost() + newDataSize, maximumBufferSize); + size_t initialBufferedSize = extraMemoryCost(); +#endif + + MediaTime rangeStart = MediaTime::zeroTime(); + MediaTime rangeEnd = rangeStart + thirtySeconds; + while (rangeStart < maximumRangeEnd) { + // 4. For each range in removal ranges, run the coded frame removal algorithm with start and + // end equal to the removal range start and end timestamp respectively. + removeCodedFrames(rangeStart, std::min(rangeEnd, maximumRangeEnd)); + if (extraMemoryCost() + newDataSize < maximumBufferSize) { + m_bufferFull = false; + break; + } + + rangeStart += thirtySeconds; + rangeEnd += thirtySeconds; + } + + if (!m_bufferFull) { + LOG(MediaSource, "SourceBuffer::evictCodedFrames(%p) - evicted %zu bytes", this, initialBufferedSize - extraMemoryCost()); + return; + } + + // If there still isn't enough free space and there buffers in time ranges after the current range (ie. there is a gap after + // the current buffered range), delete 30 seconds at a time from duration back to the current time range or 30 seconds after + // currenTime whichever we hit first. + auto buffered = m_buffered->ranges(); + size_t currentTimeRange = buffered.find(currentTime); + if (currentTimeRange == notFound || currentTimeRange == buffered.length() - 1) { + LOG(MediaSource, "SourceBuffer::evictCodedFrames(%p) - evicted %zu bytes but FAILED to free enough", this, initialBufferedSize - extraMemoryCost()); + return; + } + + MediaTime minimumRangeStart = currentTime + thirtySeconds; + + rangeEnd = m_source->duration(); + rangeStart = rangeEnd - thirtySeconds; + while (rangeStart > minimumRangeStart) { + + // Do not evict data from the time range that contains currentTime. + size_t startTimeRange = buffered.find(rangeStart); + if (startTimeRange == currentTimeRange) { + size_t endTimeRange = buffered.find(rangeEnd); + if (endTimeRange == currentTimeRange) + break; - return m_textTracks.get(); + rangeEnd = buffered.start(endTimeRange); + } + + // 4. For each range in removal ranges, run the coded frame removal algorithm with start and + // end equal to the removal range start and end timestamp respectively. + removeCodedFrames(std::max(minimumRangeStart, rangeStart), rangeEnd); + if (extraMemoryCost() + newDataSize < maximumBufferSize) { + m_bufferFull = false; + break; + } + + rangeStart -= thirtySeconds; + rangeEnd -= thirtySeconds; + } + + LOG(MediaSource, "SourceBuffer::evictCodedFrames(%p) - evicted %zu bytes%s", this, initialBufferedSize - extraMemoryCost(), m_bufferFull ? "" : " but FAILED to free enough"); +} + +size_t SourceBuffer::maximumBufferSize() const +{ + if (isRemoved()) + return 0; + + auto* element = m_source->mediaElement(); + if (!element) + return 0; + + return element->maximumSourceBufferSize(*this); +} + +VideoTrackList& SourceBuffer::videoTracks() +{ + if (!m_videoTracks) + m_videoTracks = VideoTrackList::create(m_source->mediaElement(), scriptExecutionContext()); + return *m_videoTracks; +} + +AudioTrackList& SourceBuffer::audioTracks() +{ + if (!m_audioTracks) + m_audioTracks = AudioTrackList::create(m_source->mediaElement(), scriptExecutionContext()); + return *m_audioTracks; +} + +TextTrackList& SourceBuffer::textTracks() +{ + if (!m_textTracks) + m_textTracks = TextTrackList::create(m_source->mediaElement(), scriptExecutionContext()); + return *m_textTracks; } void SourceBuffer::setActive(bool active) @@ -510,42 +977,86 @@ void SourceBuffer::setActive(bool active) m_active = active; m_private->setActive(active); - m_source->sourceBufferDidChangeAcitveState(this, active); + if (!isRemoved()) + m_source->sourceBufferDidChangeActiveState(*this, active); } -void SourceBuffer::sourceBufferPrivateDidEndStream(SourceBufferPrivate*, const WTF::AtomicString& error) +void SourceBuffer::sourceBufferPrivateDidReceiveInitializationSegment(const InitializationSegment& segment) { - m_source->endOfStream(error, IgnorableExceptionCode()); -} + if (isRemoved()) + return; + + LOG(MediaSource, "SourceBuffer::sourceBufferPrivateDidReceiveInitializationSegment(%p)", this); + + // 3.5.8 Initialization Segment Received (ctd) + // https://rawgit.com/w3c/media-source/c3ad59c7a370d04430969ba73d18dc9bcde57a33/index.html#sourcebuffer-init-segment-received [Editor's Draft 09 January 2015] -void SourceBuffer::sourceBufferPrivateDidReceiveInitializationSegment(SourceBufferPrivate*, const InitializationSegment& segment) -{ - // 3.5.7 Initialization Segment Received - // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#sourcebuffer-init-segment-received // 1. Update the duration attribute if it currently equals NaN: - if (std::isnan(m_source->duration())) { + if (m_source->duration().isInvalid()) { // ↳ If the initialization segment contains a duration: // Run the duration change algorithm with new duration set to the duration in the initialization segment. // ↳ Otherwise: // Run the duration change algorithm with new duration set to positive Infinity. MediaTime newDuration = segment.duration.isValid() ? segment.duration : MediaTime::positiveInfiniteTime(); - m_source->setDuration(newDuration.toDouble(), IGNORE_EXCEPTION); + m_source->setDurationInternal(newDuration); } - // 2. If the initialization segment has no audio, video, or text tracks, then run the end of stream - // algorithm with the error parameter set to "decode" and abort these steps. - if (!segment.audioTracks.size() && !segment.videoTracks.size() && !segment.textTracks.size()) - m_source->endOfStream(decodeError(), IgnorableExceptionCode()); - + // 2. If the initialization segment has no audio, video, or text tracks, then run the append error algorithm + // with the decode error parameter set to true and abort these steps. + if (segment.audioTracks.isEmpty() && segment.videoTracks.isEmpty() && segment.textTracks.isEmpty()) { + appendError(true); + return; + } // 3. If the first initialization segment flag is true, then run the following steps: if (m_receivedFirstInitializationSegment) { + + // 3.1. Verify the following properties. If any of the checks fail then run the append error algorithm + // with the decode error parameter set to true and abort these steps. if (!validateInitializationSegment(segment)) { - m_source->endOfStream(decodeError(), IgnorableExceptionCode()); + appendError(true); return; } // 3.2 Add the appropriate track descriptions from this initialization segment to each of the track buffers. - // NOTE: No changes to make + ASSERT(segment.audioTracks.size() == audioTracks().length()); + for (auto& audioTrackInfo : segment.audioTracks) { + if (audioTracks().length() == 1) { + audioTracks().item(0)->setPrivate(*audioTrackInfo.track); + break; + } + + auto audioTrack = audioTracks().getTrackById(audioTrackInfo.track->id()); + ASSERT(audioTrack); + audioTrack->setPrivate(*audioTrackInfo.track); + } + + ASSERT(segment.videoTracks.size() == videoTracks().length()); + for (auto& videoTrackInfo : segment.videoTracks) { + if (videoTracks().length() == 1) { + videoTracks().item(0)->setPrivate(*videoTrackInfo.track); + break; + } + + auto videoTrack = videoTracks().getTrackById(videoTrackInfo.track->id()); + ASSERT(videoTrack); + videoTrack->setPrivate(*videoTrackInfo.track); + } + + ASSERT(segment.textTracks.size() == textTracks().length()); + for (auto& textTrackInfo : segment.textTracks) { + if (textTracks().length() == 1) { + downcast<InbandTextTrack>(*textTracks().item(0)).setPrivate(*textTrackInfo.track); + break; + } + + auto textTrack = textTracks().getTrackById(textTrackInfo.track->id()); + ASSERT(textTrack); + downcast<InbandTextTrack>(*textTrack).setPrivate(*textTrackInfo.track); + } + + // 3.3 Set the need random access point flag on all track buffers to true. + for (auto& trackBuffer : m_trackBufferMap.values()) + trackBuffer.needRandomAccessFlag = true; } // 4. Let active track flag equal false. @@ -554,20 +1065,19 @@ void SourceBuffer::sourceBufferPrivateDidReceiveInitializationSegment(SourceBuff // 5. If the first initialization segment flag is false, then run the following steps: if (!m_receivedFirstInitializationSegment) { // 5.1 If the initialization segment contains tracks with codecs the user agent does not support, - // then run the end of stream algorithm with the error parameter set to "decode" and abort these steps. + // then run the append error algorithm with the decode error parameter set to true and abort these steps. // NOTE: This check is the responsibility of the SourceBufferPrivate. // 5.2 For each audio track in the initialization segment, run following steps: - for (auto it = segment.audioTracks.begin(); it != segment.audioTracks.end(); ++it) { - AudioTrackPrivate* audioTrackPrivate = it->track.get(); - + for (auto& audioTrackInfo : segment.audioTracks) { + // FIXME: Implement steps 5.2.1-5.2.8.1 as per Editor's Draft 09 January 2015, and reorder this // 5.2.1 Let new audio track be a new AudioTrack object. // 5.2.2 Generate a unique ID and assign it to the id property on new video track. - RefPtr<AudioTrack> newAudioTrack = AudioTrack::create(this, audioTrackPrivate); + auto newAudioTrack = AudioTrack::create(*this, *audioTrackInfo.track); newAudioTrack->setSourceBuffer(this); // 5.2.3 If audioTracks.length equals 0, then run the following steps: - if (!audioTracks()->length()) { + if (!audioTracks().length()) { // 5.2.3.1 Set the enabled property on new audio track to true. newAudioTrack->setEnabled(true); @@ -579,33 +1089,34 @@ void SourceBuffer::sourceBufferPrivateDidReceiveInitializationSegment(SourceBuff // 5.2.5 Queue a task to fire a trusted event named addtrack, that does not bubble and is // not cancelable, and that uses the TrackEvent interface, at the AudioTrackList object // referenced by the audioTracks attribute on this SourceBuffer object. - audioTracks()->append(newAudioTrack); + audioTracks().append(newAudioTrack.copyRef()); // 5.2.6 Add new audio track to the audioTracks attribute on the HTMLMediaElement. // 5.2.7 Queue a task to fire a trusted event named addtrack, that does not bubble and is // not cancelable, and that uses the TrackEvent interface, at the AudioTrackList object // referenced by the audioTracks attribute on the HTMLMediaElement. - m_source->mediaElement()->audioTracks()->append(newAudioTrack); + m_source->mediaElement()->audioTracks().append(newAudioTrack.copyRef()); // 5.2.8 Create a new track buffer to store coded frames for this track. ASSERT(!m_trackBufferMap.contains(newAudioTrack->id())); - TrackBuffer& trackBuffer = m_trackBufferMap.add(newAudioTrack->id(), TrackBuffer()).iterator->value; + auto& trackBuffer = m_trackBufferMap.add(newAudioTrack->id(), TrackBuffer()).iterator->value; // 5.2.9 Add the track description for this track to the track buffer. - trackBuffer.description = it->description; + trackBuffer.description = audioTrackInfo.description; + + m_audioCodecs.append(trackBuffer.description->codec()); } // 5.3 For each video track in the initialization segment, run following steps: - for (auto it = segment.videoTracks.begin(); it != segment.videoTracks.end(); ++it) { - VideoTrackPrivate* videoTrackPrivate = it->track.get(); - + for (auto& videoTrackInfo : segment.videoTracks) { + // FIXME: Implement steps 5.3.1-5.3.8.1 as per Editor's Draft 09 January 2015, and reorder this // 5.3.1 Let new video track be a new VideoTrack object. // 5.3.2 Generate a unique ID and assign it to the id property on new video track. - RefPtr<VideoTrack> newVideoTrack = VideoTrack::create(this, videoTrackPrivate); + auto newVideoTrack = VideoTrack::create(*this, *videoTrackInfo.track); newVideoTrack->setSourceBuffer(this); // 5.3.3 If videoTracks.length equals 0, then run the following steps: - if (!videoTracks()->length()) { + if (!videoTracks().length()) { // 5.3.3.1 Set the selected property on new video track to true. newVideoTrack->setSelected(true); @@ -617,58 +1128,64 @@ void SourceBuffer::sourceBufferPrivateDidReceiveInitializationSegment(SourceBuff // 5.3.5 Queue a task to fire a trusted event named addtrack, that does not bubble and is // not cancelable, and that uses the TrackEvent interface, at the VideoTrackList object // referenced by the videoTracks attribute on this SourceBuffer object. - videoTracks()->append(newVideoTrack); + videoTracks().append(newVideoTrack.copyRef()); // 5.3.6 Add new video track to the videoTracks attribute on the HTMLMediaElement. // 5.3.7 Queue a task to fire a trusted event named addtrack, that does not bubble and is // not cancelable, and that uses the TrackEvent interface, at the VideoTrackList object // referenced by the videoTracks attribute on the HTMLMediaElement. - m_source->mediaElement()->videoTracks()->append(newVideoTrack); + m_source->mediaElement()->videoTracks().append(newVideoTrack.copyRef()); // 5.3.8 Create a new track buffer to store coded frames for this track. ASSERT(!m_trackBufferMap.contains(newVideoTrack->id())); - TrackBuffer& trackBuffer = m_trackBufferMap.add(newVideoTrack->id(), TrackBuffer()).iterator->value; + auto& trackBuffer = m_trackBufferMap.add(newVideoTrack->id(), TrackBuffer()).iterator->value; // 5.3.9 Add the track description for this track to the track buffer. - trackBuffer.description = it->description; + trackBuffer.description = videoTrackInfo.description; + + m_videoCodecs.append(trackBuffer.description->codec()); } // 5.4 For each text track in the initialization segment, run following steps: - for (auto it = segment.textTracks.begin(); it != segment.textTracks.end(); ++it) { - InbandTextTrackPrivate* textTrackPrivate = it->track.get(); + for (auto& textTrackInfo : segment.textTracks) { + auto& textTrackPrivate = *textTrackInfo.track; + // FIXME: Implement steps 5.4.1-5.4.8.1 as per Editor's Draft 09 January 2015, and reorder this // 5.4.1 Let new text track be a new TextTrack object with its properties populated with the // appropriate information from the initialization segment. - RefPtr<InbandTextTrack> newTextTrack = InbandTextTrack::create(scriptExecutionContext(), this, textTrackPrivate); + auto newTextTrack = InbandTextTrack::create(*scriptExecutionContext(), *this, textTrackPrivate); // 5.4.2 If the mode property on new text track equals "showing" or "hidden", then set active // track flag to true. - if (textTrackPrivate->mode() != InbandTextTrackPrivate::Disabled) + if (textTrackPrivate.mode() != InbandTextTrackPrivate::Disabled) activeTrackFlag = true; // 5.4.3 Add new text track to the textTracks attribute on this SourceBuffer object. // 5.4.4 Queue a task to fire a trusted event named addtrack, that does not bubble and is // not cancelable, and that uses the TrackEvent interface, at textTracks attribute on this // SourceBuffer object. - textTracks()->append(newTextTrack); + textTracks().append(newTextTrack.get()); // 5.4.5 Add new text track to the textTracks attribute on the HTMLMediaElement. // 5.4.6 Queue a task to fire a trusted event named addtrack, that does not bubble and is // not cancelable, and that uses the TrackEvent interface, at the TextTrackList object // referenced by the textTracks attribute on the HTMLMediaElement. - m_source->mediaElement()->textTracks()->append(newTextTrack); + m_source->mediaElement()->textTracks().append(WTFMove(newTextTrack)); // 5.4.7 Create a new track buffer to store coded frames for this track. - ASSERT(!m_trackBufferMap.contains(textTrackPrivate->id())); - TrackBuffer& trackBuffer = m_trackBufferMap.add(textTrackPrivate->id(), TrackBuffer()).iterator->value; + ASSERT(!m_trackBufferMap.contains(textTrackPrivate.id())); + auto& trackBuffer = m_trackBufferMap.add(textTrackPrivate.id(), TrackBuffer()).iterator->value; // 5.4.8 Add the track description for this track to the track buffer. - trackBuffer.description = it->description; + trackBuffer.description = textTrackInfo.description; + + m_textCodecs.append(trackBuffer.description->codec()); } // 5.5 If active track flag equals true, then run the following steps: if (activeTrackFlag) { // 5.5.1 Add this SourceBuffer to activeSourceBuffers. + // 5.5.2 Queue a task to fire a simple event named addsourcebuffer at activeSourceBuffers setActive(true); } @@ -679,8 +1196,8 @@ void SourceBuffer::sourceBufferPrivateDidReceiveInitializationSegment(SourceBuff // 6. If the HTMLMediaElement.readyState attribute is HAVE_NOTHING, then run the following steps: if (m_private->readyState() == MediaPlayer::HaveNothing) { // 6.1 If one or more objects in sourceBuffers have first initialization segment flag set to false, then abort these steps. - for (unsigned long i = 0; i < m_source->sourceBuffers()->length(); ++i) { - if (!m_source->sourceBuffers()->item(i)->m_receivedFirstInitializationSegment) + for (auto& sourceBuffer : *m_source->sourceBuffers()) { + if (!sourceBuffer->m_receivedFirstInitializationSegment) return; } @@ -698,52 +1215,52 @@ void SourceBuffer::sourceBufferPrivateDidReceiveInitializationSegment(SourceBuff bool SourceBuffer::validateInitializationSegment(const InitializationSegment& segment) { - // 3.5.7 Initialization Segment Received (ctd) - // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#sourcebuffer-init-segment-received + // FIXME: ordering of all 3.5.X (X>=7) functions needs to be updated to post-[24 July 2014 Editor's Draft] version + // 3.5.8 Initialization Segment Received (ctd) + // https://rawgit.com/w3c/media-source/c3ad59c7a370d04430969ba73d18dc9bcde57a33/index.html#sourcebuffer-init-segment-received [Editor's Draft 09 January 2015] - // 3.1. Verify the following properties. If any of the checks fail then run the end of stream - // algorithm with the error parameter set to "decode" and abort these steps. + // Note: those are checks from step 3.1 // * The number of audio, video, and text tracks match what was in the first initialization segment. - if (segment.audioTracks.size() != audioTracks()->length() - || segment.videoTracks.size() != videoTracks()->length() - || segment.textTracks.size() != textTracks()->length()) + if (segment.audioTracks.size() != audioTracks().length() + || segment.videoTracks.size() != videoTracks().length() + || segment.textTracks.size() != textTracks().length()) return false; // * The codecs for each track, match what was specified in the first initialization segment. - for (auto it = segment.audioTracks.begin(); it != segment.audioTracks.end(); ++it) { - if (!m_videoCodecs.contains(it->description->codec())) + for (auto& audioTrackInfo : segment.audioTracks) { + if (!m_audioCodecs.contains(audioTrackInfo.description->codec())) return false; } - for (auto it = segment.videoTracks.begin(); it != segment.videoTracks.end(); ++it) { - if (!m_audioCodecs.contains(it->description->codec())) + for (auto& videoTrackInfo : segment.videoTracks) { + if (!m_videoCodecs.contains(videoTrackInfo.description->codec())) return false; } - for (auto it = segment.textTracks.begin(); it != segment.textTracks.end(); ++it) { - if (!m_textCodecs.contains(it->description->codec())) + for (auto& textTrackInfo : segment.textTracks) { + if (!m_textCodecs.contains(textTrackInfo.description->codec())) return false; } // * If more than one track for a single type are present (ie 2 audio tracks), then the Track // IDs match the ones in the first initialization segment. if (segment.audioTracks.size() >= 2) { - for (auto it = segment.audioTracks.begin(); it != segment.audioTracks.end(); ++it) { - if (!m_trackBufferMap.contains(it->track->id())) + for (auto& audioTrackInfo : segment.audioTracks) { + if (!m_trackBufferMap.contains(audioTrackInfo.track->id())) return false; } } if (segment.videoTracks.size() >= 2) { - for (auto it = segment.videoTracks.begin(); it != segment.videoTracks.end(); ++it) { - if (!m_trackBufferMap.contains(it->track->id())) + for (auto& videoTrackInfo : segment.videoTracks) { + if (!m_trackBufferMap.contains(videoTrackInfo.track->id())) return false; } } if (segment.textTracks.size() >= 2) { - for (auto it = segment.videoTracks.begin(); it != segment.videoTracks.end(); ++it) { - if (!m_trackBufferMap.contains(it->track->id())) + for (auto& textTrackInfo : segment.videoTracks) { + if (!m_trackBufferMap.contains(textTrackInfo.track->id())) return false; } } @@ -769,102 +1286,188 @@ public: } }; -void SourceBuffer::sourceBufferPrivateDidReceiveSample(SourceBufferPrivate*, PassRefPtr<MediaSample> prpSample) +void SourceBuffer::appendError(bool decodeErrorParam) { - RefPtr<MediaSample> sample = prpSample; + // 3.5.3 Append Error Algorithm + // https://rawgit.com/w3c/media-source/c3ad59c7a370d04430969ba73d18dc9bcde57a33/index.html#sourcebuffer-append-error [Editor's Draft 09 January 2015] + + ASSERT(m_updating); + // 1. Run the reset parser state algorithm. + resetParserState(); + + // 2. Set the updating attribute to false. + m_updating = false; + + // 3. Queue a task to fire a simple event named error at this SourceBuffer object. + scheduleEvent(eventNames().errorEvent); + + // 4. Queue a task to fire a simple event named updateend at this SourceBuffer object. + scheduleEvent(eventNames().updateendEvent); + + // 5. If decode error is true, then run the end of stream algorithm with the error parameter set to "decode". + if (decodeErrorParam) + m_source->streamEndedWithError(MediaSource::EndOfStreamError::Decode); +} + +void SourceBuffer::sourceBufferPrivateDidReceiveSample(MediaSample& sample) +{ + if (isRemoved()) + return; + + // 3.5.1 Segment Parser Loop + // 6.1 If the first initialization segment received flag is false, then run the append error algorithm + // with the decode error parameter set to true and abort this algorithm. + // Note: current design makes SourceBuffer somehow ignorant of append state - it's more a thing + // of SourceBufferPrivate. That's why this check can't really be done in appendInternal. + // unless we force some kind of design with state machine switching. + if (!m_receivedFirstInitializationSegment) { + appendError(true); + return; + } // 3.5.8 Coded Frame Processing + // http://www.w3.org/TR/media-source/#sourcebuffer-coded-frame-processing + // When complete coded frames have been parsed by the segment parser loop then the following steps // are run: // 1. For each coded frame in the media segment run the following steps: // 1.1. Loop Top do { - // 1.1 (ctd) Let presentation timestamp be a double precision floating point representation of - // the coded frame's presentation timestamp in seconds. - MediaTime presentationTimestamp = sample->presentationTime(); - - // 1.2 Let decode timestamp be a double precision floating point representation of the coded frame's - // decode timestamp in seconds. - MediaTime decodeTimestamp = sample->decodeTime(); + MediaTime presentationTimestamp; + MediaTime decodeTimestamp; + + if (m_shouldGenerateTimestamps) { + // ↳ If generate timestamps flag equals true: + // 1. Let presentation timestamp equal 0. + presentationTimestamp = MediaTime::zeroTime(); + + // 2. Let decode timestamp equal 0. + decodeTimestamp = MediaTime::zeroTime(); + } else { + // ↳ Otherwise: + // 1. Let presentation timestamp be a double precision floating point representation of + // the coded frame's presentation timestamp in seconds. + presentationTimestamp = sample.presentationTime(); + + // 2. Let decode timestamp be a double precision floating point representation of the coded frame's + // decode timestamp in seconds. + decodeTimestamp = sample.decodeTime(); + } - // 1.3 Let frame duration be a double precision floating point representation of the coded frame's + // 1.2 Let frame duration be a double precision floating point representation of the coded frame's // duration in seconds. - MediaTime frameDuration = sample->duration(); + MediaTime frameDuration = sample.duration(); + + // 1.3 If mode equals "sequence" and group start timestamp is set, then run the following steps: + if (m_mode == AppendMode::Sequence && m_groupStartTimestamp.isValid()) { + // 1.3.1 Set timestampOffset equal to group start timestamp - presentation timestamp. + m_timestampOffset = m_groupStartTimestamp; - // 1.4 If mode equals "sequence" and group start timestamp is set, then run the following steps: - // FIXME: add support for "sequence" mode + // 1.3.2 Set group end timestamp equal to group start timestamp. + m_groupEndTimestamp = m_groupStartTimestamp; - // 1.5 If timestampOffset is not 0, then run the following steps: - if (m_timestampOffset != MediaTime::zeroTime()) { - // 1.5.1 Add timestampOffset to the presentation timestamp. + // 1.3.3 Set the need random access point flag on all track buffers to true. + for (auto& trackBuffer : m_trackBufferMap.values()) + trackBuffer.needRandomAccessFlag = true; + + // 1.3.4 Unset group start timestamp. + m_groupStartTimestamp = MediaTime::invalidTime(); + } + + // 1.4 If timestampOffset is not 0, then run the following steps: + if (m_timestampOffset) { + // 1.4.1 Add timestampOffset to the presentation timestamp. presentationTimestamp += m_timestampOffset; - // 1.5.2 Add timestampOffset to the decode timestamp. + // 1.4.2 Add timestampOffset to the decode timestamp. decodeTimestamp += m_timestampOffset; - - // 1.5.3 If the presentation timestamp or decode timestamp is less than the presentation start - // time, then run the end of stream algorithm with the error parameter set to "decode", and - // abort these steps. - MediaTime presentationStartTime = MediaTime::zeroTime(); - if (presentationTimestamp < presentationStartTime || decodeTimestamp < presentationStartTime) { - m_source->streamEndedWithError(decodeError(), IgnorableExceptionCode()); - return; - } } - // 1.6 Let track buffer equal the track buffer that the coded frame will be added to. - AtomicString trackID = sample->trackID(); + // 1.5 Let track buffer equal the track buffer that the coded frame will be added to. + AtomicString trackID = sample.trackID(); auto it = m_trackBufferMap.find(trackID); - if (it == m_trackBufferMap.end()) - it = m_trackBufferMap.add(trackID, TrackBuffer()).iterator; + if (it == m_trackBufferMap.end()) { + // The client managed to append a sample with a trackID not present in the initialization + // segment. This would be a good place to post an message to the developer console. + didDropSample(); + return; + } TrackBuffer& trackBuffer = it->value; - // 1.7 If last decode timestamp for track buffer is set and decode timestamp is less than last + // 1.6 ↳ If last decode timestamp for track buffer is set and decode timestamp is less than last // decode timestamp: // OR - // If last decode timestamp for track buffer is set and the difference between decode timestamp and + // ↳ If last decode timestamp for track buffer is set and the difference between decode timestamp and // last decode timestamp is greater than 2 times last frame duration: if (trackBuffer.lastDecodeTimestamp.isValid() && (decodeTimestamp < trackBuffer.lastDecodeTimestamp || abs(decodeTimestamp - trackBuffer.lastDecodeTimestamp) > (trackBuffer.lastFrameDuration * 2))) { - // 1.7.1 If mode equals "segments": - // Set highest presentation end timestamp to presentation timestamp. - m_highestPresentationEndTimestamp = presentationTimestamp; - - // If mode equals "sequence": - // Set group start timestamp equal to the highest presentation end timestamp. - // FIXME: Add support for "sequence" mode. - - for (auto i = m_trackBufferMap.values().begin(); i != m_trackBufferMap.values().end(); ++i) { - // 1.7.2 Unset the last decode timestamp on all track buffers. - i->lastDecodeTimestamp = MediaTime::invalidTime(); - // 1.7.3 Unset the last frame duration on all track buffers. - i->lastFrameDuration = MediaTime::invalidTime(); - // 1.7.4 Unset the highest presentation timestamp on all track buffers. - i->highestPresentationTimestamp = MediaTime::invalidTime(); - // 1.7.5 Set the need random access point flag on all track buffers to true. - i->needRandomAccessFlag = true; + + // 1.6.1: + if (m_mode == AppendMode::Segments) { + // ↳ If mode equals "segments": + // Set group end timestamp to presentation timestamp. + m_groupEndTimestamp = presentationTimestamp; + } else { + // ↳ If mode equals "sequence": + // Set group start timestamp equal to the group end timestamp. + m_groupStartTimestamp = m_groupEndTimestamp; } - // 1.7.6 Jump to the Loop Top step above to restart processing of the current coded frame. + for (auto& trackBuffer : m_trackBufferMap.values()) { + // 1.6.2 Unset the last decode timestamp on all track buffers. + trackBuffer.lastDecodeTimestamp = MediaTime::invalidTime(); + // 1.6.3 Unset the last frame duration on all track buffers. + trackBuffer.lastFrameDuration = MediaTime::invalidTime(); + // 1.6.4 Unset the highest presentation timestamp on all track buffers. + trackBuffer.highestPresentationTimestamp = MediaTime::invalidTime(); + // 1.6.5 Set the need random access point flag on all track buffers to true. + trackBuffer.needRandomAccessFlag = true; + } + + // 1.6.6 Jump to the Loop Top step above to restart processing of the current coded frame. continue; } - // 1.8 Let frame end timestamp equal the sum of presentation timestamp and frame duration. + if (m_mode == AppendMode::Sequence) { + // Use the generated timestamps instead of the sample's timestamps. + sample.setTimestamps(presentationTimestamp, decodeTimestamp); + } else if (m_timestampOffset) { + // Reflect the timestamp offset into the sample. + sample.offsetTimestampsBy(m_timestampOffset); + } + + // 1.7 Let frame end timestamp equal the sum of presentation timestamp and frame duration. MediaTime frameEndTimestamp = presentationTimestamp + frameDuration; - // 1.9 If presentation timestamp is less than appendWindowStart, then set the need random access + // 1.8 If presentation timestamp is less than appendWindowStart, then set the need random access // point flag to true, drop the coded frame, and jump to the top of the loop to start processing // the next coded frame. - // 1.10 If frame end timestamp is greater than appendWindowEnd, then set the need random access + // 1.9 If frame end timestamp is greater than appendWindowEnd, then set the need random access // point flag to true, drop the coded frame, and jump to the top of the loop to start processing // the next coded frame. - // FIXME: implement append windows + if (presentationTimestamp < m_appendWindowStart || frameEndTimestamp > m_appendWindowEnd) { + trackBuffer.needRandomAccessFlag = true; + didDropSample(); + return; + } + + + // 1.10 If the decode timestamp is less than the presentation start time, then run the end of stream + // algorithm with the error parameter set to "decode", and abort these steps. + // NOTE: Until <https://www.w3.org/Bugs/Public/show_bug.cgi?id=27487> is resolved, we will only check + // the presentation timestamp. + MediaTime presentationStartTime = MediaTime::zeroTime(); + if (presentationTimestamp < presentationStartTime) { + LOG(MediaSource, "SourceBuffer::sourceBufferPrivateDidReceiveSample(%p) - failing because presentationTimestamp < presentationStartTime", this); + m_source->streamEndedWithError(MediaSource::EndOfStreamError::Decode); + return; + } // 1.11 If the need random access point flag on track buffer equals true, then run the following steps: if (trackBuffer.needRandomAccessFlag) { // 1.11.1 If the coded frame is not a random access point, then drop the coded frame and jump // to the top of the loop to start processing the next coded frame. - if (!sample->isSync()) { + if (!sample.isSync()) { didDropSample(); return; } @@ -877,16 +1480,15 @@ void SourceBuffer::sourceBufferPrivateDidReceiveSample(SourceBufferPrivate*, Pas // 1.13 Let spliced timed text frame be an unset variable for holding timed text splice information // FIXME: Add support for sample splicing. - SampleMap::MapType erasedSamples; + SampleMap erasedSamples; MediaTime microsecond(1, 1000000); - // 1.14 If last decode timestamp for track buffer is unset and there is a coded frame in - // track buffer with a presentation timestamp less than or equal to presentation timestamp - // and presentation timestamp is less than this coded frame's presentation timestamp plus - // its frame duration, then run the following steps: + // 1.14 If last decode timestamp for track buffer is unset and presentation timestamp falls + // falls within the presentation interval of a coded frame in track buffer, then run the + // following steps: if (trackBuffer.lastDecodeTimestamp.isInvalid()) { - auto iter = trackBuffer.samples.findSampleContainingPresentationTime(presentationTimestamp); - if (iter != trackBuffer.samples.presentationEnd()) { + auto iter = trackBuffer.samples.presentationOrder().findSampleContainingPresentationTime(presentationTimestamp); + if (iter != trackBuffer.samples.presentationOrder().end()) { // 1.14.1 Let overlapped frame be the coded frame in track buffer that matches the condition above. RefPtr<MediaSample> overlappedFrame = iter->second; @@ -908,7 +1510,7 @@ void SourceBuffer::sourceBufferPrivateDidReceiveSample(SourceBufferPrivate*, Pas // 1.14.2.3 If the presentation timestamp is less than the remove window timestamp, // then remove overlapped frame and any coded frames that depend on it from track buffer. if (presentationTimestamp < removeWindowTimestamp) - erasedSamples.insert(*iter); + erasedSamples.addSample(*iter->second); } // If track buffer contains timed text coded frames: @@ -922,52 +1524,64 @@ void SourceBuffer::sourceBufferPrivateDidReceiveSample(SourceBufferPrivate*, Pas if (trackBuffer.highestPresentationTimestamp.isInvalid()) { // Remove all coded frames from track buffer that have a presentation timestamp greater than or // equal to presentation timestamp and less than frame end timestamp. - auto iter_pair = trackBuffer.samples.findSamplesBetweenPresentationTimes(presentationTimestamp, frameEndTimestamp); - if (iter_pair.first != trackBuffer.samples.presentationEnd()) - erasedSamples.insert(iter_pair.first, iter_pair.second); + auto iter_pair = trackBuffer.samples.presentationOrder().findSamplesBetweenPresentationTimes(presentationTimestamp, frameEndTimestamp); + if (iter_pair.first != trackBuffer.samples.presentationOrder().end()) + erasedSamples.addRange(iter_pair.first, iter_pair.second); } - // If highest presentation timestamp for track buffer is set and less than presentation timestamp - if (trackBuffer.highestPresentationTimestamp.isValid() && trackBuffer.highestPresentationTimestamp < presentationTimestamp) { + // If highest presentation timestamp for track buffer is set and less than or equal to presentation timestamp + if (trackBuffer.highestPresentationTimestamp.isValid() && trackBuffer.highestPresentationTimestamp <= presentationTimestamp) { // Remove all coded frames from track buffer that have a presentation timestamp greater than highest // presentation timestamp and less than or equal to frame end timestamp. - auto iter_pair = trackBuffer.samples.findSamplesBetweenPresentationTimes(trackBuffer.highestPresentationTimestamp, frameEndTimestamp); - if (iter_pair.first != trackBuffer.samples.presentationEnd()) - erasedSamples.insert(iter_pair.first, iter_pair.second); + do { + // NOTE: Searching from the end of the trackBuffer will be vastly more efficient if the search range is + // near the end of the buffered range. Use a linear-backwards search if the search range is within one + // frame duration of the end: + unsigned bufferedLength = trackBuffer.buffered.length(); + if (!bufferedLength) + break; + + MediaTime highestBufferedTime = trackBuffer.buffered.maximumBufferedTime(); + + PresentationOrderSampleMap::iterator_range range; + if (highestBufferedTime - trackBuffer.highestPresentationTimestamp < trackBuffer.lastFrameDuration) + range = trackBuffer.samples.presentationOrder().findSamplesWithinPresentationRangeFromEnd(trackBuffer.highestPresentationTimestamp, frameEndTimestamp); + else + range = trackBuffer.samples.presentationOrder().findSamplesWithinPresentationRange(trackBuffer.highestPresentationTimestamp, frameEndTimestamp); + + if (range.first != trackBuffer.samples.presentationOrder().end()) + erasedSamples.addRange(range.first, range.second); + } while(false); } // 1.16 Remove decoding dependencies of the coded frames removed in the previous step: - SampleMap::MapType dependentSamples; + DecodeOrderSampleMap::MapType dependentSamples; if (!erasedSamples.empty()) { // If detailed information about decoding dependencies is available: // FIXME: Add support for detailed dependency information // Otherwise: Remove all coded frames between the coded frames removed in the previous step // and the next random access point after those removed frames. - for (auto erasedIt = erasedSamples.begin(), end = erasedSamples.end(); erasedIt != end; ++erasedIt) { - auto currentDecodeIter = trackBuffer.samples.findSampleWithDecodeTime(erasedIt->second->decodeTime()); - auto nextSyncIter = trackBuffer.samples.findSyncSampleAfterDecodeIterator(currentDecodeIter); - dependentSamples.insert(currentDecodeIter, nextSyncIter); - } - - - RefPtr<TimeRanges> erasedRanges = TimeRanges::create(); - for (auto erasedIt = erasedSamples.begin(), end = erasedSamples.end(); erasedIt != end; ++erasedIt) { - double startTime = erasedIt->first.toDouble(); - double endTime = ((erasedIt->first + erasedIt->second->duration()) + microsecond).toDouble(); - erasedRanges->add(startTime, endTime); - trackBuffer.samples.removeSample(erasedIt->second.get()); - } - - for (auto dependentIt = dependentSamples.begin(), end = dependentSamples.end(); dependentIt != end; ++dependentIt) { - double startTime = dependentIt->first.toDouble(); - double endTime = ((dependentIt->first + dependentIt->second->duration()) + microsecond).toDouble(); - erasedRanges->add(startTime, endTime); - trackBuffer.samples.removeSample(dependentIt->second.get()); + auto firstDecodeIter = trackBuffer.samples.decodeOrder().findSampleWithDecodeKey(erasedSamples.decodeOrder().begin()->first); + auto lastDecodeIter = trackBuffer.samples.decodeOrder().findSampleWithDecodeKey(erasedSamples.decodeOrder().rbegin()->first); + auto nextSyncIter = trackBuffer.samples.decodeOrder().findSyncSampleAfterDecodeIterator(lastDecodeIter); + dependentSamples.insert(firstDecodeIter, nextSyncIter); + + PlatformTimeRanges erasedRanges = removeSamplesFromTrackBuffer(dependentSamples, trackBuffer, this, "sourceBufferPrivateDidReceiveSample"); + + // Only force the TrackBuffer to re-enqueue if the removed ranges overlap with enqueued and possibly + // not yet displayed samples. + MediaTime currentMediaTime = m_source->currentTime(); + if (currentMediaTime < trackBuffer.lastEnqueuedPresentationTime) { + PlatformTimeRanges possiblyEnqueuedRanges(currentMediaTime, trackBuffer.lastEnqueuedPresentationTime); + possiblyEnqueuedRanges.intersectWith(erasedRanges); + if (possiblyEnqueuedRanges.length()) + trackBuffer.needsReenqueueing = true; } - erasedRanges->invert(); - m_buffered->intersectWith(erasedRanges.get()); + erasedRanges.invert(); + trackBuffer.buffered.intersectWith(erasedRanges); + setBufferedDirty(true); } // 1.17 If spliced audio frame is set: @@ -979,7 +1593,11 @@ void SourceBuffer::sourceBufferPrivateDidReceiveSample(SourceBufferPrivate*, Pas // Otherwise: // Add the coded frame with the presentation timestamp, decode timestamp, and frame duration to the track buffer. trackBuffer.samples.addSample(sample); - trackBuffer.decodeQueue.insert(SampleMap::MapType::value_type(decodeTimestamp, sample)); + + if (trackBuffer.lastEnqueuedDecodeEndTime.isInvalid() || decodeTimestamp >= trackBuffer.lastEnqueuedDecodeEndTime) { + DecodeOrderSampleMap::KeyType decodeKey(decodeTimestamp, presentationTimestamp); + trackBuffer.decodeQueue.insert(DecodeOrderSampleMap::MapType::value_type(decodeKey, &sample)); + } // 1.18 Set last decode timestamp for track buffer to decode timestamp. trackBuffer.lastDecodeTimestamp = decodeTimestamp; @@ -993,12 +1611,29 @@ void SourceBuffer::sourceBufferPrivateDidReceiveSample(SourceBufferPrivate*, Pas if (trackBuffer.highestPresentationTimestamp.isInvalid() || frameEndTimestamp > trackBuffer.highestPresentationTimestamp) trackBuffer.highestPresentationTimestamp = frameEndTimestamp; - // 1.21 If highest presentation end timestamp is unset or frame end timestamp is greater than highest - // presentation end timestamp, then set highest presentation end timestamp equal to frame end timestamp. - if (m_highestPresentationEndTimestamp.isInvalid() || frameEndTimestamp > m_highestPresentationEndTimestamp) - m_highestPresentationEndTimestamp = frameEndTimestamp; + // 1.21 If frame end timestamp is greater than group end timestamp, then set group end timestamp equal + // to frame end timestamp. + if (m_groupEndTimestamp.isInvalid() || frameEndTimestamp > m_groupEndTimestamp) + m_groupEndTimestamp = frameEndTimestamp; + + // 1.22 If generate timestamps flag equals true, then set timestampOffset equal to frame end timestamp. + if (m_shouldGenerateTimestamps) + m_timestampOffset = frameEndTimestamp; - m_buffered->add(presentationTimestamp.toDouble(), (presentationTimestamp + frameDuration + microsecond).toDouble()); + // Eliminate small gaps between buffered ranges by coalescing + // disjoint ranges separated by less than a "fudge factor". + auto presentationEndTime = presentationTimestamp + frameDuration; + auto nearestToPresentationStartTime = trackBuffer.buffered.nearest(presentationTimestamp); + if (nearestToPresentationStartTime.isValid() && (presentationTimestamp - nearestToPresentationStartTime).isBetween(MediaTime::zeroTime(), MediaSource::currentTimeFudgeFactor())) + presentationTimestamp = nearestToPresentationStartTime; + + auto nearestToPresentationEndTime = trackBuffer.buffered.nearest(presentationEndTime); + if (nearestToPresentationEndTime.isValid() && (nearestToPresentationEndTime - presentationEndTime).isBetween(MediaTime::zeroTime(), MediaSource::currentTimeFudgeFactor())) + presentationEndTime = nearestToPresentationEndTime; + + trackBuffer.buffered.add(presentationTimestamp, presentationEndTime); + m_bufferedSinceLastMonitor += frameDuration.toDouble(); + setBufferedDirty(true); break; } while (1); @@ -1006,35 +1641,45 @@ void SourceBuffer::sourceBufferPrivateDidReceiveSample(SourceBufferPrivate*, Pas // Steps 2-4 will be handled by MediaSource::monitorSourceBuffers() // 5. If the media segment contains data beyond the current duration, then run the duration change algorithm with new - // duration set to the maximum of the current duration and the highest end timestamp reported by HTMLMediaElement.buffered. - if (highestPresentationEndTimestamp().toDouble() > m_source->duration()) - m_source->setDuration(highestPresentationEndTimestamp().toDouble(), IgnorableExceptionCode()); + // duration set to the maximum of the current duration and the group end timestamp. + if (m_groupEndTimestamp > m_source->duration()) + m_source->setDurationInternal(m_groupEndTimestamp); } -bool SourceBuffer::sourceBufferPrivateHasAudio(const SourceBufferPrivate*) const +bool SourceBuffer::hasAudio() const { return m_audioTracks && m_audioTracks->length(); } -bool SourceBuffer::sourceBufferPrivateHasVideo(const SourceBufferPrivate*) const +bool SourceBuffer::hasVideo() const { return m_videoTracks && m_videoTracks->length(); } -void SourceBuffer::videoTrackSelectedChanged(VideoTrack* track) +bool SourceBuffer::sourceBufferPrivateHasAudio() const +{ + return hasAudio(); +} + +bool SourceBuffer::sourceBufferPrivateHasVideo() const +{ + return hasVideo(); +} + +void SourceBuffer::videoTrackSelectedChanged(VideoTrack& track) { // 2.4.5 Changes to selected/enabled track state // If the selected video track changes, then run the following steps: // 1. If the SourceBuffer associated with the previously selected video track is not associated with // any other enabled tracks, run the following steps: - if (track->selected() + if (!track.selected() && (!m_videoTracks || !m_videoTracks->isAnyTrackEnabled()) && (!m_audioTracks || !m_audioTracks->isAnyTrackEnabled()) && (!m_textTracks || !m_textTracks->isAnyTrackEnabled())) { // 1.1 Remove the SourceBuffer from activeSourceBuffers. // 1.2 Queue a task to fire a simple event named removesourcebuffer at activeSourceBuffers setActive(false); - } else if (!track->selected()) { + } else if (track.selected()) { // 2. If the SourceBuffer associated with the newly selected video track is not already in activeSourceBuffers, // run the following steps: // 2.1 Add the SourceBuffer to activeSourceBuffers. @@ -1042,23 +1687,26 @@ void SourceBuffer::videoTrackSelectedChanged(VideoTrack* track) setActive(true); } + if (m_videoTracks && m_videoTracks->contains(track)) + m_videoTracks->scheduleChangeEvent(); + if (!isRemoved()) m_source->mediaElement()->videoTrackSelectedChanged(track); } -void SourceBuffer::audioTrackEnabledChanged(AudioTrack* track) +void SourceBuffer::audioTrackEnabledChanged(AudioTrack& track) { // 2.4.5 Changes to selected/enabled track state // If an audio track becomes disabled and the SourceBuffer associated with this track is not // associated with any other enabled or selected track, then run the following steps: - if (track->enabled() + if (!track.enabled() && (!m_videoTracks || !m_videoTracks->isAnyTrackEnabled()) && (!m_audioTracks || !m_audioTracks->isAnyTrackEnabled()) && (!m_textTracks || !m_textTracks->isAnyTrackEnabled())) { // 1. Remove the SourceBuffer associated with the audio track from activeSourceBuffers // 2. Queue a task to fire a simple event named removesourcebuffer at activeSourceBuffers setActive(false); - } else if (!track->enabled()) { + } else if (track.enabled()) { // If an audio track becomes enabled and the SourceBuffer associated with this track is // not already in activeSourceBuffers, then run the following steps: // 1. Add the SourceBuffer associated with the audio track to activeSourceBuffers @@ -1066,16 +1714,19 @@ void SourceBuffer::audioTrackEnabledChanged(AudioTrack* track) setActive(true); } + if (m_audioTracks && m_audioTracks->contains(track)) + m_audioTracks->scheduleChangeEvent(); + if (!isRemoved()) m_source->mediaElement()->audioTrackEnabledChanged(track); } -void SourceBuffer::textTrackModeChanged(TextTrack* track) +void SourceBuffer::textTrackModeChanged(TextTrack& track) { // 2.4.5 Changes to selected/enabled track state // If a text track mode becomes "disabled" and the SourceBuffer associated with this track is not // associated with any other enabled or selected track, then run the following steps: - if (track->mode() == TextTrack::disabledKeyword() + if (track.mode() == TextTrack::Mode::Disabled && (!m_videoTracks || !m_videoTracks->isAnyTrackEnabled()) && (!m_audioTracks || !m_audioTracks->isAnyTrackEnabled()) && (!m_textTracks || !m_textTracks->isAnyTrackEnabled())) { @@ -1090,82 +1741,324 @@ void SourceBuffer::textTrackModeChanged(TextTrack* track) setActive(true); } + if (m_textTracks && m_textTracks->contains(track)) + m_textTracks->scheduleChangeEvent(); + if (!isRemoved()) m_source->mediaElement()->textTrackModeChanged(track); } -void SourceBuffer::textTrackAddCue(TextTrack* track, WTF::PassRefPtr<TextTrackCue> cue) +void SourceBuffer::textTrackAddCue(TextTrack& track, TextTrackCue& cue) { if (!isRemoved()) m_source->mediaElement()->textTrackAddCue(track, cue); } -void SourceBuffer::textTrackAddCues(TextTrack* track, TextTrackCueList const* cueList) +void SourceBuffer::textTrackAddCues(TextTrack& track, const TextTrackCueList& cueList) { if (!isRemoved()) m_source->mediaElement()->textTrackAddCues(track, cueList); } -void SourceBuffer::textTrackRemoveCue(TextTrack* track, WTF::PassRefPtr<TextTrackCue> cue) +void SourceBuffer::textTrackRemoveCue(TextTrack& track, TextTrackCue& cue) { if (!isRemoved()) m_source->mediaElement()->textTrackRemoveCue(track, cue); } -void SourceBuffer::textTrackRemoveCues(TextTrack* track, TextTrackCueList const* cueList) +void SourceBuffer::textTrackRemoveCues(TextTrack& track, const TextTrackCueList& cueList) { if (!isRemoved()) m_source->mediaElement()->textTrackRemoveCues(track, cueList); } -void SourceBuffer::textTrackKindChanged(TextTrack* track) +void SourceBuffer::textTrackKindChanged(TextTrack& track) { if (!isRemoved()) m_source->mediaElement()->textTrackKindChanged(track); } -void SourceBuffer::sourceBufferPrivateDidBecomeReadyForMoreSamples(SourceBufferPrivate*, AtomicString trackID) +void SourceBuffer::sourceBufferPrivateDidBecomeReadyForMoreSamples(const AtomicString& trackID) { - LOG(Media, "SourceBuffer::sourceBufferPrivateDidBecomeReadyForMoreSamples(%p)", this); + LOG(MediaSource, "SourceBuffer::sourceBufferPrivateDidBecomeReadyForMoreSamples(%p)", this); auto it = m_trackBufferMap.find(trackID); if (it == m_trackBufferMap.end()) return; - provideMediaData(it->value, trackID); + auto& trackBuffer = it->value; + if (!trackBuffer.needsReenqueueing && !m_source->isSeeking()) + provideMediaData(trackBuffer, trackID); } -void SourceBuffer::provideMediaData(TrackBuffer& trackBuffer, AtomicString trackID) +void SourceBuffer::provideMediaData(TrackBuffer& trackBuffer, const AtomicString& trackID) { + if (m_source->isSeeking()) + return; + #if !LOG_DISABLED unsigned enqueuedSamples = 0; #endif - auto sampleIt = trackBuffer.decodeQueue.begin(); - for (auto sampleEnd = trackBuffer.decodeQueue.end(); sampleIt != sampleEnd; ++sampleIt) { + while (!trackBuffer.decodeQueue.empty()) { if (!m_private->isReadyForMoreSamples(trackID)) { m_private->notifyClientWhenReadyForMoreSamples(trackID); break; } - RefPtr<MediaSample> sample = sampleIt->second; + // FIXME(rdar://problem/20635969): Remove this re-entrancy protection when the aforementioned radar is resolved; protecting + // against re-entrancy introduces a small inefficency when removing appended samples from the decode queue one at a time + // rather than when all samples have been enqueued. + auto sample = trackBuffer.decodeQueue.begin()->second; + trackBuffer.decodeQueue.erase(trackBuffer.decodeQueue.begin()); + + // Do not enqueue samples spanning a significant unbuffered gap. + // NOTE: one second is somewhat arbitrary. MediaSource::monitorSourceBuffers() is run + // on the playbackTimer, which is effectively every 350ms. Allowing > 350ms gap between + // enqueued samples allows for situations where we overrun the end of a buffered range + // but don't notice for 350s of playback time, and the client can enqueue data for the + // new current time without triggering this early return. + // FIXME(135867): Make this gap detection logic less arbitrary. + MediaTime oneSecond(1, 1); + if (trackBuffer.lastEnqueuedDecodeEndTime.isValid() && sample->decodeTime() - trackBuffer.lastEnqueuedDecodeEndTime > oneSecond) + break; + trackBuffer.lastEnqueuedPresentationTime = sample->presentationTime(); - m_private->enqueueSample(sample.release(), trackID); + trackBuffer.lastEnqueuedDecodeEndTime = sample->decodeTime() + sample->duration(); + m_private->enqueueSample(sample.releaseNonNull(), trackID); #if !LOG_DISABLED ++enqueuedSamples; #endif + } + + LOG(MediaSource, "SourceBuffer::provideMediaData(%p) - Enqueued %u samples", this, enqueuedSamples); +} + +void SourceBuffer::reenqueueMediaForTime(TrackBuffer& trackBuffer, const AtomicString& trackID, const MediaTime& time) +{ + m_private->flush(trackID); + trackBuffer.decodeQueue.clear(); + // Find the sample which contains the current presentation time. + auto currentSamplePTSIterator = trackBuffer.samples.presentationOrder().findSampleContainingPresentationTime(time); + + if (currentSamplePTSIterator == trackBuffer.samples.presentationOrder().end()) + currentSamplePTSIterator = trackBuffer.samples.presentationOrder().findSampleStartingOnOrAfterPresentationTime(time); + + if (currentSamplePTSIterator == trackBuffer.samples.presentationOrder().end() + || (currentSamplePTSIterator->first - time) > MediaSource::currentTimeFudgeFactor()) + return; + + // Seach backward for the previous sync sample. + DecodeOrderSampleMap::KeyType decodeKey(currentSamplePTSIterator->second->decodeTime(), currentSamplePTSIterator->second->presentationTime()); + auto currentSampleDTSIterator = trackBuffer.samples.decodeOrder().findSampleWithDecodeKey(decodeKey); + ASSERT(currentSampleDTSIterator != trackBuffer.samples.decodeOrder().end()); + + auto reverseCurrentSampleIter = --DecodeOrderSampleMap::reverse_iterator(currentSampleDTSIterator); + auto reverseLastSyncSampleIter = trackBuffer.samples.decodeOrder().findSyncSamplePriorToDecodeIterator(reverseCurrentSampleIter); + if (reverseLastSyncSampleIter == trackBuffer.samples.decodeOrder().rend()) + return; + + // Fill the decode queue with the non-displaying samples. + for (auto iter = reverseLastSyncSampleIter; iter != reverseCurrentSampleIter; --iter) { + auto copy = iter->second->createNonDisplayingCopy(); + DecodeOrderSampleMap::KeyType decodeKey(copy->decodeTime(), copy->presentationTime()); + trackBuffer.decodeQueue.insert(DecodeOrderSampleMap::MapType::value_type(decodeKey, WTFMove(copy))); + } + + if (!trackBuffer.decodeQueue.empty()) { + auto& lastSample = trackBuffer.decodeQueue.rbegin()->second; + trackBuffer.lastEnqueuedPresentationTime = lastSample->presentationTime(); + trackBuffer.lastEnqueuedDecodeEndTime = lastSample->decodeTime(); + } else { + trackBuffer.lastEnqueuedPresentationTime = MediaTime::invalidTime(); + trackBuffer.lastEnqueuedDecodeEndTime = MediaTime::invalidTime(); } - trackBuffer.decodeQueue.erase(trackBuffer.decodeQueue.begin(), sampleIt); - LOG(Media, "SourceBuffer::provideMediaData(%p) - Enqueued %u samples", this, enqueuedSamples); + // Fill the decode queue with the remaining samples. + for (auto iter = currentSampleDTSIterator; iter != trackBuffer.samples.decodeOrder().end(); ++iter) + trackBuffer.decodeQueue.insert(*iter); + provideMediaData(trackBuffer, trackID); + + trackBuffer.needsReenqueueing = false; } + void SourceBuffer::didDropSample() { if (!isRemoved()) m_source->mediaElement()->incrementDroppedFrameCount(); } +void SourceBuffer::monitorBufferingRate() +{ + double now = monotonicallyIncreasingTime(); + double interval = now - m_timeOfBufferingMonitor; + double rateSinceLastMonitor = m_bufferedSinceLastMonitor / interval; + + m_timeOfBufferingMonitor = now; + m_bufferedSinceLastMonitor = 0; + + m_averageBufferRate += (interval * ExponentialMovingAverageCoefficient) * (rateSinceLastMonitor - m_averageBufferRate); + + LOG(MediaSource, "SourceBuffer::monitorBufferingRate(%p) - m_avegareBufferRate: %lf", this, m_averageBufferRate); +} + +void SourceBuffer::updateBufferedFromTrackBuffers() +{ + // 3.1 Attributes, buffered + // https://rawgit.com/w3c/media-source/45627646344eea0170dd1cbc5a3d508ca751abb8/media-source-respec.html#dom-sourcebuffer-buffered + + // 2. Let highest end time be the largest track buffer ranges end time across all the track buffers managed by this SourceBuffer object. + MediaTime highestEndTime = MediaTime::negativeInfiniteTime(); + for (auto& trackBuffer : m_trackBufferMap.values()) { + if (!trackBuffer.buffered.length()) + continue; + highestEndTime = std::max(highestEndTime, trackBuffer.buffered.maximumBufferedTime()); + } + + // NOTE: Short circuit the following if none of the TrackBuffers have buffered ranges to avoid generating + // a single range of {0, 0}. + if (highestEndTime.isNegativeInfinite()) { + m_buffered->ranges() = PlatformTimeRanges(); + return; + } + + // 3. Let intersection ranges equal a TimeRange object containing a single range from 0 to highest end time. + PlatformTimeRanges intersectionRanges { MediaTime::zeroTime(), highestEndTime }; + + // 4. For each audio and video track buffer managed by this SourceBuffer, run the following steps: + for (auto& trackBuffer : m_trackBufferMap.values()) { + // 4.1 Let track ranges equal the track buffer ranges for the current track buffer. + PlatformTimeRanges trackRanges = trackBuffer.buffered; + // 4.2 If readyState is "ended", then set the end time on the last range in track ranges to highest end time. + if (m_source->isEnded()) + trackRanges.add(trackRanges.maximumBufferedTime(), highestEndTime); + + // 4.3 Let new intersection ranges equal the intersection between the intersection ranges and the track ranges. + // 4.4 Replace the ranges in intersection ranges with the new intersection ranges. + intersectionRanges.intersectWith(trackRanges); + } + + // 5. If intersection ranges does not contain the exact same range information as the current value of this attribute, + // then update the current value of this attribute to intersection ranges. + m_buffered->ranges() = intersectionRanges; + setBufferedDirty(true); +} + +bool SourceBuffer::canPlayThroughRange(PlatformTimeRanges& ranges) +{ + if (isRemoved()) + return false; + + monitorBufferingRate(); + + // Assuming no fluctuations in the buffering rate, loading 1 second per second or greater + // means indefinite playback. This could be improved by taking jitter into account. + if (m_averageBufferRate > 1) + return true; + + // Add up all the time yet to be buffered. + MediaTime currentTime = m_source->currentTime(); + MediaTime duration = m_source->duration(); + + PlatformTimeRanges unbufferedRanges = ranges; + unbufferedRanges.invert(); + unbufferedRanges.intersectWith(PlatformTimeRanges(currentTime, std::max(currentTime, duration))); + MediaTime unbufferedTime = unbufferedRanges.totalDuration(); + if (!unbufferedTime.isValid()) + return true; + + MediaTime timeRemaining = duration - currentTime; + return unbufferedTime.toDouble() / m_averageBufferRate < timeRemaining.toDouble(); +} + +size_t SourceBuffer::extraMemoryCost() const +{ + size_t extraMemoryCost = m_pendingAppendData.capacity(); + for (auto& trackBuffer : m_trackBufferMap.values()) + extraMemoryCost += trackBuffer.samples.sizeInBytes(); + + return extraMemoryCost; +} + +void SourceBuffer::reportExtraMemoryAllocated() +{ + size_t extraMemoryCost = this->extraMemoryCost(); + if (extraMemoryCost <= m_reportedExtraMemoryCost) + return; + + size_t extraMemoryCostDelta = extraMemoryCost - m_reportedExtraMemoryCost; + m_reportedExtraMemoryCost = extraMemoryCost; + + JSC::JSLockHolder lock(scriptExecutionContext()->vm()); + // FIXME: Adopt reportExtraMemoryVisited, and switch to reportExtraMemoryAllocated. + // https://bugs.webkit.org/show_bug.cgi?id=142595 + scriptExecutionContext()->vm().heap.deprecatedReportExtraMemory(extraMemoryCostDelta); +} + +Vector<String> SourceBuffer::bufferedSamplesForTrackID(const AtomicString& trackID) +{ + auto it = m_trackBufferMap.find(trackID); + if (it == m_trackBufferMap.end()) + return Vector<String>(); + + TrackBuffer& trackBuffer = it->value; + Vector<String> sampleDescriptions; + for (auto& pair : trackBuffer.samples.decodeOrder()) + sampleDescriptions.append(toString(*pair.second)); + + return sampleDescriptions; +} + +Vector<String> SourceBuffer::enqueuedSamplesForTrackID(const AtomicString& trackID) +{ + return m_private->enqueuedSamplesForTrackID(trackID); +} + +Document& SourceBuffer::document() const +{ + ASSERT(scriptExecutionContext()); + return downcast<Document>(*scriptExecutionContext()); +} + +ExceptionOr<void> SourceBuffer::setMode(AppendMode newMode) +{ + // 3.1 Attributes - mode + // http://www.w3.org/TR/media-source/#widl-SourceBuffer-mode + + // On setting, run the following steps: + + // 1. Let new mode equal the new value being assigned to this attribute. + // 2. If generate timestamps flag equals true and new mode equals "segments", then throw an INVALID_ACCESS_ERR exception and abort these steps. + if (m_shouldGenerateTimestamps && newMode == AppendMode::Segments) + return Exception { INVALID_ACCESS_ERR }; + + // 3. If this object has been removed from the sourceBuffers attribute of the parent media source, then throw an INVALID_STATE_ERR exception and abort these steps. + // 4. If the updating attribute equals true, then throw an INVALID_STATE_ERR exception and abort these steps. + if (isRemoved() || m_updating) + return Exception { INVALID_STATE_ERR }; + + // 5. If the readyState attribute of the parent media source is in the "ended" state then run the following steps: + if (m_source->isEnded()) { + // 5.1. Set the readyState attribute of the parent media source to "open" + // 5.2. Queue a task to fire a simple event named sourceopen at the parent media source. + m_source->openIfInEndedState(); + } + + // 6. If the append state equals PARSING_MEDIA_SEGMENT, then throw an INVALID_STATE_ERR and abort these steps. + if (m_appendState == ParsingMediaSegment) + return Exception { INVALID_STATE_ERR }; + + // 7. If the new mode equals "sequence", then set the group start timestamp to the group end timestamp. + if (newMode == AppendMode::Sequence) + m_groupStartTimestamp = m_groupEndTimestamp; + + // 8. Update the attribute to new mode. + m_mode = newMode; + + return { }; +} + } // namespace WebCore #endif |