summaryrefslogtreecommitdiff
path: root/Source/WebCore/Modules/mediasource
diff options
context:
space:
mode:
Diffstat (limited to 'Source/WebCore/Modules/mediasource')
-rw-r--r--Source/WebCore/Modules/mediasource/AudioTrackMediaSource.h41
-rw-r--r--Source/WebCore/Modules/mediasource/AudioTrackMediaSource.idl4
-rw-r--r--Source/WebCore/Modules/mediasource/DOMURLMediaSource.cpp5
-rw-r--r--Source/WebCore/Modules/mediasource/DOMURLMediaSource.h9
-rw-r--r--Source/WebCore/Modules/mediasource/DOMURLMediaSource.idl2
-rw-r--r--Source/WebCore/Modules/mediasource/MediaSource.cpp786
-rw-r--r--Source/WebCore/Modules/mediasource/MediaSource.h134
-rw-r--r--Source/WebCore/Modules/mediasource/MediaSource.idl27
-rw-r--r--Source/WebCore/Modules/mediasource/MediaSourceRegistry.cpp14
-rw-r--r--Source/WebCore/Modules/mediasource/MediaSourceRegistry.h15
-rw-r--r--Source/WebCore/Modules/mediasource/SampleMap.cpp219
-rw-r--r--Source/WebCore/Modules/mediasource/SampleMap.h120
-rw-r--r--Source/WebCore/Modules/mediasource/SourceBuffer.cpp1651
-rw-r--r--Source/WebCore/Modules/mediasource/SourceBuffer.h198
-rw-r--r--Source/WebCore/Modules/mediasource/SourceBuffer.idl41
-rw-r--r--Source/WebCore/Modules/mediasource/SourceBufferList.cpp31
-rw-r--r--Source/WebCore/Modules/mediasource/SourceBufferList.h30
-rw-r--r--Source/WebCore/Modules/mediasource/SourceBufferList.idl7
-rw-r--r--Source/WebCore/Modules/mediasource/TextTrackMediaSource.h43
-rw-r--r--Source/WebCore/Modules/mediasource/TextTrackMediaSource.idl4
-rw-r--r--Source/WebCore/Modules/mediasource/VideoPlaybackQuality.cpp4
-rw-r--r--Source/WebCore/Modules/mediasource/VideoPlaybackQuality.h9
-rw-r--r--Source/WebCore/Modules/mediasource/VideoPlaybackQuality.idl4
-rw-r--r--Source/WebCore/Modules/mediasource/VideoTrackMediaSource.h41
-rw-r--r--Source/WebCore/Modules/mediasource/VideoTrackMediaSource.idl4
25 files changed, 2503 insertions, 940 deletions
diff --git a/Source/WebCore/Modules/mediasource/AudioTrackMediaSource.h b/Source/WebCore/Modules/mediasource/AudioTrackMediaSource.h
new file mode 100644
index 000000000..786c71722
--- /dev/null
+++ b/Source/WebCore/Modules/mediasource/AudioTrackMediaSource.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(MEDIA_SOURCE) && ENABLE(VIDEO_TRACK)
+
+#include "AudioTrack.h"
+
+namespace WebCore {
+
+class AudioTrackMediaSource {
+public:
+ static SourceBuffer* sourceBuffer(AudioTrack& track) { return track.sourceBuffer(); }
+};
+
+} // namespace WebCore
+
+#endif // ENABLE(MEDIA_SOURCE) && ENABLE(VIDEO_TRACK)
diff --git a/Source/WebCore/Modules/mediasource/AudioTrackMediaSource.idl b/Source/WebCore/Modules/mediasource/AudioTrackMediaSource.idl
index f91cb4a04..714241391 100644
--- a/Source/WebCore/Modules/mediasource/AudioTrackMediaSource.idl
+++ b/Source/WebCore/Modules/mediasource/AudioTrackMediaSource.idl
@@ -10,10 +10,10 @@
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
- * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
diff --git a/Source/WebCore/Modules/mediasource/DOMURLMediaSource.cpp b/Source/WebCore/Modules/mediasource/DOMURLMediaSource.cpp
index 205127484..513f9fb9a 100644
--- a/Source/WebCore/Modules/mediasource/DOMURLMediaSource.cpp
+++ b/Source/WebCore/Modules/mediasource/DOMURLMediaSource.cpp
@@ -39,13 +39,10 @@
namespace WebCore {
-String DOMURLMediaSource::createObjectURL(ScriptExecutionContext* scriptExecutionContext, MediaSource* source)
+String DOMURLMediaSource::createObjectURL(ScriptExecutionContext& scriptExecutionContext, MediaSource& source)
{
// Since WebWorkers cannot obtain MediaSource objects, we should be on the main thread.
ASSERT(isMainThread());
-
- if (!scriptExecutionContext || !source)
- return String();
return DOMURL::createPublicURL(scriptExecutionContext, source);
}
diff --git a/Source/WebCore/Modules/mediasource/DOMURLMediaSource.h b/Source/WebCore/Modules/mediasource/DOMURLMediaSource.h
index 1eccd4b75..4a3e8c4d3 100644
--- a/Source/WebCore/Modules/mediasource/DOMURLMediaSource.h
+++ b/Source/WebCore/Modules/mediasource/DOMURLMediaSource.h
@@ -28,8 +28,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef DOMURLMediaSource_h
-#define DOMURLMediaSource_h
+#pragma once
#if ENABLE(MEDIA_SOURCE)
@@ -42,11 +41,9 @@ class ScriptExecutionContext;
class DOMURLMediaSource {
public:
- static String createObjectURL(ScriptExecutionContext*, MediaSource*);
+ static String createObjectURL(ScriptExecutionContext&, MediaSource&);
};
} // namespace WebCore
-#endif
-
-#endif
+#endif // ENABLE(MEDIA_SOURCE)
diff --git a/Source/WebCore/Modules/mediasource/DOMURLMediaSource.idl b/Source/WebCore/Modules/mediasource/DOMURLMediaSource.idl
index d154397eb..a8e78decd 100644
--- a/Source/WebCore/Modules/mediasource/DOMURLMediaSource.idl
+++ b/Source/WebCore/Modules/mediasource/DOMURLMediaSource.idl
@@ -31,5 +31,5 @@
Conditional=MEDIA_SOURCE
]
partial interface DOMURL {
- [CallWith=ScriptExecutionContext,TreatReturnedNullStringAs=Null] static DOMString createObjectURL(MediaSource? source);
+ [CallWith=ScriptExecutionContext] static DOMString createObjectURL(MediaSource source);
};
diff --git a/Source/WebCore/Modules/mediasource/MediaSource.cpp b/Source/WebCore/Modules/mediasource/MediaSource.cpp
index 7a3294676..4f188b852 100644
--- a/Source/WebCore/Modules/mediasource/MediaSource.cpp
+++ b/Source/WebCore/Modules/mediasource/MediaSource.cpp
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2013 Google Inc. All rights reserved.
+ * Copyright (C) 2013-2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
@@ -33,79 +34,103 @@
#if ENABLE(MEDIA_SOURCE)
-#include "AudioTrack.h"
#include "AudioTrackList.h"
#include "ContentType.h"
#include "Event.h"
+#include "EventNames.h"
#include "ExceptionCode.h"
-#include "ExceptionCodePlaceholder.h"
-#include "GenericEventQueue.h"
#include "HTMLMediaElement.h"
#include "Logging.h"
-#include "MIMETypeRegistry.h"
-#include "MediaError.h"
-#include "MediaPlayer.h"
+#include "MediaSourcePrivate.h"
#include "MediaSourceRegistry.h"
+#include "SourceBuffer.h"
+#include "SourceBufferList.h"
#include "SourceBufferPrivate.h"
-#include "TextTrack.h"
#include "TextTrackList.h"
#include "TimeRanges.h"
-#include "VideoTrack.h"
#include "VideoTrackList.h"
-#include <runtime/Uint8Array.h>
-#include <wtf/text/CString.h>
-#include <wtf/text/WTFString.h>
namespace WebCore {
-PassRefPtr<MediaSource> MediaSource::create(ScriptExecutionContext& context)
+URLRegistry* MediaSource::s_registry;
+
+void MediaSource::setRegistry(URLRegistry* registry)
+{
+ ASSERT(!s_registry);
+ s_registry = registry;
+}
+
+Ref<MediaSource> MediaSource::create(ScriptExecutionContext& context)
{
- RefPtr<MediaSource> mediaSource(adoptRef(new MediaSource(context)));
+ auto mediaSource = adoptRef(*new MediaSource(context));
mediaSource->suspendIfNeeded();
- return mediaSource.release();
+ return mediaSource;
}
MediaSource::MediaSource(ScriptExecutionContext& context)
: ActiveDOMObject(&context)
- , m_mediaElement(0)
+ , m_duration(MediaTime::invalidTime())
+ , m_pendingSeekTime(MediaTime::invalidTime())
, m_readyState(closedKeyword())
, m_asyncEventQueue(*this)
{
- LOG(Media, "MediaSource::MediaSource %p", this);
+ LOG(MediaSource, "MediaSource::MediaSource %p", this);
m_sourceBuffers = SourceBufferList::create(scriptExecutionContext());
m_activeSourceBuffers = SourceBufferList::create(scriptExecutionContext());
}
MediaSource::~MediaSource()
{
- LOG(Media, "MediaSource::~MediaSource %p", this);
+ LOG(MediaSource, "MediaSource::~MediaSource %p", this);
ASSERT(isClosed());
}
const AtomicString& MediaSource::openKeyword()
{
- DEFINE_STATIC_LOCAL(const AtomicString, open, ("open", AtomicString::ConstructFromLiteral));
+ static NeverDestroyed<const AtomicString> open("open", AtomicString::ConstructFromLiteral);
return open;
}
const AtomicString& MediaSource::closedKeyword()
{
- DEFINE_STATIC_LOCAL(const AtomicString, closed, ("closed", AtomicString::ConstructFromLiteral));
+ static NeverDestroyed<const AtomicString> closed("closed", AtomicString::ConstructFromLiteral);
return closed;
}
const AtomicString& MediaSource::endedKeyword()
{
- DEFINE_STATIC_LOCAL(const AtomicString, ended, ("ended", AtomicString::ConstructFromLiteral));
+ static NeverDestroyed<const AtomicString> ended("ended", AtomicString::ConstructFromLiteral);
return ended;
}
-void MediaSource::setPrivateAndOpen(PassRef<MediaSourcePrivate> mediaSourcePrivate)
+void MediaSource::setPrivateAndOpen(Ref<MediaSourcePrivate>&& mediaSourcePrivate)
{
ASSERT(!m_private);
ASSERT(m_mediaElement);
- m_private = std::move(mediaSourcePrivate);
+ m_private = WTFMove(mediaSourcePrivate);
+
+ // 2.4.1 Attaching to a media element
+ // https://rawgit.com/w3c/media-source/45627646344eea0170dd1cbc5a3d508ca751abb8/media-source-respec.html#mediasource-attach
+
+ // ↳ If readyState is NOT set to "closed"
+ // Run the "If the media data cannot be fetched at all, due to network errors, causing the user agent to give up trying
+ // to fetch the resource" steps of the resource fetch algorithm's media data processing steps list.
+ if (!isClosed()) {
+ m_mediaElement->mediaLoadingFailedFatally(MediaPlayer::NetworkError);
+ return;
+ }
+
+ // ↳ Otherwise
+ // 1. Set the media element's delaying-the-load-event-flag to false.
+ m_mediaElement->setShouldDelayLoadEvent(false);
+
+ // 2. Set the readyState attribute to "open".
+ // 3. Queue a task to fire a simple event named sourceopen at the MediaSource.
setReadyState(openKeyword());
+
+ // 4. Continue the resource fetch algorithm by running the remaining "Otherwise (mode is local)" steps,
+ // with these clarifications:
+ // NOTE: This is handled in HTMLMediaElement.
}
void MediaSource::addedToRegistry()
@@ -118,110 +143,268 @@ void MediaSource::removedFromRegistry()
unsetPendingActivity(this);
}
-double MediaSource::duration() const
+MediaTime MediaSource::duration() const
{
- return isClosed() ? std::numeric_limits<float>::quiet_NaN() : m_private->duration();
+ return m_duration;
}
-PassRefPtr<TimeRanges> MediaSource::buffered() const
+void MediaSource::durationChanged(const MediaTime& duration)
{
+ m_duration = duration;
+}
+
+MediaTime MediaSource::currentTime() const
+{
+ return m_mediaElement ? m_mediaElement->currentMediaTime() : MediaTime::zeroTime();
+}
+
+std::unique_ptr<PlatformTimeRanges> MediaSource::buffered() const
+{
+ if (m_buffered && m_activeSourceBuffers->length() && std::all_of(m_activeSourceBuffers->begin(), m_activeSourceBuffers->end(), [](auto& buffer) { return !buffer->isBufferedDirty(); }))
+ return std::make_unique<PlatformTimeRanges>(*m_buffered);
+
+ m_buffered = std::make_unique<PlatformTimeRanges>();
+ for (auto& sourceBuffer : *m_activeSourceBuffers)
+ sourceBuffer->setBufferedDirty(false);
+
// Implements MediaSource algorithm for HTMLMediaElement.buffered.
// https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#htmlmediaelement-extensions
- Vector<RefPtr<TimeRanges>> ranges = activeRanges();
+ Vector<PlatformTimeRanges> activeRanges = this->activeRanges();
// 1. If activeSourceBuffers.length equals 0 then return an empty TimeRanges object and abort these steps.
- if (ranges.isEmpty())
- return TimeRanges::create();
+ if (activeRanges.isEmpty())
+ return std::make_unique<PlatformTimeRanges>(*m_buffered);
// 2. Let active ranges be the ranges returned by buffered for each SourceBuffer object in activeSourceBuffers.
// 3. Let highest end time be the largest range end time in the active ranges.
- double highestEndTime = -1;
- for (size_t i = 0; i < ranges.size(); ++i) {
- unsigned length = ranges[i]->length();
+ MediaTime highestEndTime = MediaTime::zeroTime();
+ for (auto& ranges : activeRanges) {
+ unsigned length = ranges.length();
if (length)
- highestEndTime = std::max(highestEndTime, ranges[i]->end(length - 1, ASSERT_NO_EXCEPTION));
+ highestEndTime = std::max(highestEndTime, ranges.end(length - 1));
}
// Return an empty range if all ranges are empty.
- if (highestEndTime < 0)
- return TimeRanges::create();
+ if (!highestEndTime)
+ return std::make_unique<PlatformTimeRanges>(*m_buffered);
// 4. Let intersection ranges equal a TimeRange object containing a single range from 0 to highest end time.
- RefPtr<TimeRanges> intersectionRanges = TimeRanges::create(0, highestEndTime);
+ m_buffered->add(MediaTime::zeroTime(), highestEndTime);
// 5. For each SourceBuffer object in activeSourceBuffers run the following steps:
bool ended = readyState() == endedKeyword();
- for (size_t i = 0; i < ranges.size(); ++i) {
+ for (auto& sourceRanges : activeRanges) {
// 5.1 Let source ranges equal the ranges returned by the buffered attribute on the current SourceBuffer.
- TimeRanges* sourceRanges = ranges[i].get();
-
// 5.2 If readyState is "ended", then set the end time on the last range in source ranges to highest end time.
- if (ended && sourceRanges->length())
- sourceRanges->add(sourceRanges->start(sourceRanges->length() - 1, ASSERT_NO_EXCEPTION), highestEndTime);
+ if (ended && sourceRanges.length())
+ sourceRanges.add(sourceRanges.start(sourceRanges.length() - 1), highestEndTime);
// 5.3 Let new intersection ranges equal the the intersection between the intersection ranges and the source ranges.
// 5.4 Replace the ranges in intersection ranges with the new intersection ranges.
- intersectionRanges->intersectWith(sourceRanges);
+ m_buffered->intersectWith(sourceRanges);
}
- return intersectionRanges.release();
+ return std::make_unique<PlatformTimeRanges>(*m_buffered);
}
-class SourceBufferBufferedDoesNotContainTime {
-public:
- SourceBufferBufferedDoesNotContainTime(double time) : m_time(time) { }
- bool operator()(RefPtr<SourceBuffer> sourceBuffer)
- {
- return !sourceBuffer->buffered()->contain(m_time);
- }
+void MediaSource::seekToTime(const MediaTime& time)
+{
+ if (isClosed())
+ return;
- double m_time;
-};
+ // 2.4.3 Seeking
+ // https://rawgit.com/w3c/media-source/45627646344eea0170dd1cbc5a3d508ca751abb8/media-source-respec.html#mediasource-seeking
-class SourceBufferBufferedHasEnough {
-public:
- SourceBufferBufferedHasEnough(double time, double duration) : m_time(time), m_duration(duration) { }
- bool operator()(RefPtr<SourceBuffer> sourceBuffer)
- {
- size_t rangePos = sourceBuffer->buffered()->find(m_time);
- if (rangePos == notFound)
- return false;
+ m_pendingSeekTime = time;
- double endTime = sourceBuffer->buffered()->end(rangePos, IGNORE_EXCEPTION);
- return m_duration - endTime < 1;
+ // Run the following steps as part of the "Wait until the user agent has established whether or not the
+ // media data for the new playback position is available, and, if it is, until it has decoded enough data
+ // to play back that position" step of the seek algorithm:
+ // ↳ If new playback position is not in any TimeRange of HTMLMediaElement.buffered
+ if (!hasBufferedTime(time)) {
+ // 1. If the HTMLMediaElement.readyState attribute is greater than HAVE_METADATA,
+ // then set the HTMLMediaElement.readyState attribute to HAVE_METADATA.
+ m_private->setReadyState(MediaPlayer::HaveMetadata);
+
+ // 2. The media element waits until an appendBuffer() or an appendStream() call causes the coded
+ // frame processing algorithm to set the HTMLMediaElement.readyState attribute to a value greater
+ // than HAVE_METADATA.
+ LOG(MediaSource, "MediaSource::seekToTime(%p) - waitForSeekCompleted()", this);
+ m_private->waitForSeekCompleted();
+ return;
}
+ // ↳ Otherwise
+ // Continue
+
+ completeSeek();
+}
+
+void MediaSource::completeSeek()
+{
+ if (isClosed())
+ return;
+
+ // 2.4.3 Seeking, ctd.
+ // https://dvcs.w3.org/hg/html-media/raw-file/tip/media-source/media-source.html#mediasource-seeking
+
+ ASSERT(m_pendingSeekTime.isValid());
+
+ // 2. The media element resets all decoders and initializes each one with data from the appropriate
+ // initialization segment.
+ // 3. The media element feeds coded frames from the active track buffers into the decoders starting
+ // with the closest random access point before the new playback position.
+ MediaTime pendingSeekTime = m_pendingSeekTime;
+ m_pendingSeekTime = MediaTime::invalidTime();
+ for (auto& sourceBuffer : *m_activeSourceBuffers)
+ sourceBuffer->seekToTime(pendingSeekTime);
+
+ // 4. Resume the seek algorithm at the "Await a stable state" step.
+ m_private->seekCompleted();
+
+ monitorSourceBuffers();
+}
+
+Ref<TimeRanges> MediaSource::seekable()
+{
+ // 6. HTMLMediaElement Extensions, seekable
+ // W3C Editor's Draft 16 September 2016
+ // https://rawgit.com/w3c/media-source/45627646344eea0170dd1cbc5a3d508ca751abb8/media-source-respec.html#htmlmediaelement-extensions
+
+ // ↳ If duration equals NaN:
+ // Return an empty TimeRanges object.
+ if (m_duration.isInvalid())
+ return TimeRanges::create();
- double m_time;
- double m_duration;
-};
-
-class SourceBufferBufferedHasFuture {
-public:
- SourceBufferBufferedHasFuture(double time) : m_time(time) { }
- bool operator()(RefPtr<SourceBuffer> sourceBuffer)
- {
- size_t rangePos = sourceBuffer->buffered()->find(m_time);
- if (rangePos == notFound)
- return false;
-
- double endTime = sourceBuffer->buffered()->end(rangePos, IGNORE_EXCEPTION);
- return endTime - m_time > 1;
+ // ↳ If duration equals positive Infinity:
+ if (m_duration.isPositiveInfinite()) {
+ auto buffered = this->buffered();
+ // If live seekable range is not empty:
+ if (m_liveSeekable && m_liveSeekable->length()) {
+ // Let union ranges be the union of live seekable range and the HTMLMediaElement.buffered attribute.
+ buffered->unionWith(*m_liveSeekable);
+ // Return a single range with a start time equal to the earliest start time in union ranges
+ // and an end time equal to the highest end time in union ranges and abort these steps.
+ buffered->add(buffered->start(0), buffered->maximumBufferedTime());
+ return TimeRanges::create(*buffered);
+ }
+
+ // If the HTMLMediaElement.buffered attribute returns an empty TimeRanges object, then return
+ // an empty TimeRanges object and abort these steps.
+ if (!buffered->length())
+ return TimeRanges::create();
+
+ // Return a single range with a start time of 0 and an end time equal to the highest end time
+ // reported by the HTMLMediaElement.buffered attribute.
+ return TimeRanges::create({MediaTime::zeroTime(), buffered->maximumBufferedTime()});
}
- double m_time;
-};
+ // ↳ Otherwise:
+ // Return a single range with a start time of 0 and an end time equal to duration.
+ return TimeRanges::create({MediaTime::zeroTime(), m_duration});
+}
+
+ExceptionOr<void> MediaSource::setLiveSeekableRange(double start, double end)
+{
+ // W3C Editor's Draft 16 September 2016
+ // https://rawgit.com/w3c/media-source/45627646344eea0170dd1cbc5a3d508ca751abb8/media-source-respec.html#dom-mediasource-setliveseekablerange
+
+ // If the readyState attribute is not "open" then throw an InvalidStateError exception and abort these steps.
+ if (!isOpen())
+ return Exception { INVALID_STATE_ERR };
+
+ // If start is negative or greater than end, then throw a TypeError exception and abort these steps.
+ if (start < 0 || start > end)
+ return Exception { TypeError };
+
+ // Set live seekable range to be a new normalized TimeRanges object containing a single range
+ // whose start position is start and end position is end.
+ m_liveSeekable = std::make_unique<PlatformTimeRanges>(MediaTime::createWithDouble(start), MediaTime::createWithDouble(end));
+
+ return { };
+}
+
+ExceptionOr<void> MediaSource::clearLiveSeekableRange()
+{
+ // W3C Editor's Draft 16 September 2016
+ // https://rawgit.com/w3c/media-source/45627646344eea0170dd1cbc5a3d508ca751abb8/media-source-respec.html#dom-mediasource-clearliveseekablerange
+
+ // If the readyState attribute is not "open" then throw an InvalidStateError exception and abort these steps.
+ if (!isOpen())
+ return Exception { INVALID_STATE_ERR };
+ m_liveSeekable = nullptr;
+ return { };
+}
+
+const MediaTime& MediaSource::currentTimeFudgeFactor()
+{
+ // Allow hasCurrentTime() to be off by as much as the length of two 24fps video frames
+ static NeverDestroyed<MediaTime> fudgeFactor(2002, 24000);
+ return fudgeFactor;
+}
+
+bool MediaSource::hasBufferedTime(const MediaTime& time)
+{
+ if (time > duration())
+ return false;
+
+ auto ranges = buffered();
+ if (!ranges->length())
+ return false;
+
+ return abs(ranges->nearest(time) - time) <= currentTimeFudgeFactor();
+}
+
+bool MediaSource::hasCurrentTime()
+{
+ return hasBufferedTime(currentTime());
+}
+
+bool MediaSource::hasFutureTime()
+{
+ MediaTime currentTime = this->currentTime();
+ MediaTime duration = this->duration();
+
+ if (currentTime >= duration)
+ return true;
+
+ auto ranges = buffered();
+ MediaTime nearest = ranges->nearest(currentTime);
+ if (abs(nearest - currentTime) > currentTimeFudgeFactor())
+ return false;
+
+ size_t found = ranges->find(nearest);
+ if (found == notFound)
+ return false;
+
+ MediaTime localEnd = ranges->end(found);
+ if (localEnd == duration)
+ return true;
+
+ return localEnd - currentTime > currentTimeFudgeFactor();
+}
void MediaSource::monitorSourceBuffers()
{
- double currentTime = mediaElement()->currentTime();
+ if (isClosed())
+ return;
// 2.4.4 SourceBuffer Monitoring
- // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#buffer-monitoring
- // ↳ If buffered for all objects in activeSourceBuffers do not contain TimeRanges for the current
- // playback position:
- auto begin = m_activeSourceBuffers->begin();
- auto end = m_activeSourceBuffers->end();
- if (std::all_of(begin, end, SourceBufferBufferedDoesNotContainTime(currentTime))) {
+ // https://rawgit.com/w3c/media-source/45627646344eea0170dd1cbc5a3d508ca751abb8/media-source-respec.html#buffer-monitoring
+
+ // Note, the behavior if activeSourceBuffers is empty is undefined.
+ if (!m_activeSourceBuffers) {
+ m_private->setReadyState(MediaPlayer::HaveNothing);
+ return;
+ }
+
+ // ↳ If the the HTMLMediaElement.readyState attribute equals HAVE_NOTHING:
+ if (mediaElement()->readyState() == HTMLMediaElement::HAVE_NOTHING) {
+ // 1. Abort these steps.
+ return;
+ }
+
+ // ↳ If HTMLMediaElement.buffered does not contain a TimeRange for the current playback position:
+ if (!hasCurrentTime()) {
// 1. Set the HTMLMediaElement.readyState attribute to HAVE_METADATA.
// 2. If this is the first transition to HAVE_METADATA, then queue a task to fire a simple event
// named loadedmetadata at the media element.
@@ -231,33 +414,40 @@ void MediaSource::monitorSourceBuffers()
return;
}
- // ↳ If buffered for all objects in activeSourceBuffers contain TimeRanges that include the current
- // playback position and enough data to ensure uninterrupted playback:
- if (std::all_of(begin, end, SourceBufferBufferedHasEnough(currentTime, mediaElement()->duration()))) {
+ // ↳ If HTMLMediaElement.buffered contains a TimeRange that includes the current
+ // playback position and enough data to ensure uninterrupted playback:
+ auto ranges = buffered();
+ if (std::all_of(m_activeSourceBuffers->begin(), m_activeSourceBuffers->end(), [&](auto& sourceBuffer) {
+ return sourceBuffer->canPlayThroughRange(*ranges);
+ })) {
// 1. Set the HTMLMediaElement.readyState attribute to HAVE_ENOUGH_DATA.
// 2. Queue a task to fire a simple event named canplaythrough at the media element.
// 3. Playback may resume at this point if it was previously suspended by a transition to HAVE_CURRENT_DATA.
m_private->setReadyState(MediaPlayer::HaveEnoughData);
+ if (m_pendingSeekTime.isValid())
+ completeSeek();
+
// 4. Abort these steps.
return;
}
- // ↳ If buffered for at least one object in activeSourceBuffers contains a TimeRange that includes
- // the current playback position but not enough data to ensure uninterrupted playback:
- if (std::any_of(begin, end, SourceBufferBufferedHasFuture(currentTime))) {
+ // ↳ If HTMLMediaElement.buffered contains a TimeRange that includes the current playback
+ // position and some time beyond the current playback position, then run the following steps:
+ if (hasFutureTime()) {
// 1. Set the HTMLMediaElement.readyState attribute to HAVE_FUTURE_DATA.
// 2. If the previous value of HTMLMediaElement.readyState was less than HAVE_FUTURE_DATA, then queue a task to fire a simple event named canplay at the media element.
// 3. Playback may resume at this point if it was previously suspended by a transition to HAVE_CURRENT_DATA.
m_private->setReadyState(MediaPlayer::HaveFutureData);
+ if (m_pendingSeekTime.isValid())
+ completeSeek();
+
// 4. Abort these steps.
return;
}
- // ↳ If buffered for at least one object in activeSourceBuffers contains a TimeRange that ends
- // at the current playback position and does not have a range covering the time immediately
- // after the current position:
+ // ↳ If HTMLMediaElement.buffered contains a TimeRange that ends at the current playback position and does not have a range covering the time immediately after the current position:
// NOTE: Logically, !(all objects do not contain currentTime) == (some objects contain current time)
// 1. Set the HTMLMediaElement.readyState attribute to HAVE_CURRENT_DATA.
@@ -266,35 +456,84 @@ void MediaSource::monitorSourceBuffers()
// 3. Playback is suspended at this point since the media element doesn't have enough data to
// advance the media timeline.
m_private->setReadyState(MediaPlayer::HaveCurrentData);
-
+
+ if (m_pendingSeekTime.isValid())
+ completeSeek();
+
// 4. Abort these steps.
}
-void MediaSource::setDuration(double duration, ExceptionCode& ec)
+ExceptionOr<void> MediaSource::setDuration(double duration)
{
- if (duration < 0.0 || std::isnan(duration)) {
- ec = INVALID_ACCESS_ERR;
- return;
- }
- if (!isOpen()) {
- ec = INVALID_STATE_ERR;
- return;
+ // 2.1 Attributes - Duration
+ // https://dvcs.w3.org/hg/html-media/raw-file/tip/media-source/media-source.html#attributes
+
+ // On setting, run the following steps:
+ // 1. If the value being set is negative or NaN then throw an INVALID_ACCESS_ERR exception and abort these steps.
+ if (duration < 0.0 || std::isnan(duration))
+ return Exception { INVALID_ACCESS_ERR };
+
+ // 2. If the readyState attribute is not "open" then throw an INVALID_STATE_ERR exception and abort these steps.
+ if (!isOpen())
+ return Exception { INVALID_STATE_ERR };
+
+ // 3. If the updating attribute equals true on any SourceBuffer in sourceBuffers, then throw an INVALID_STATE_ERR
+ // exception and abort these steps.
+ for (auto& sourceBuffer : *m_sourceBuffers) {
+ if (sourceBuffer->updating())
+ return Exception { INVALID_STATE_ERR };
}
- m_private->setDuration(duration);
+
+ // 4. Run the duration change algorithm with new duration set to the value being assigned to this attribute.
+ return setDurationInternal(MediaTime::createWithDouble(duration));
}
+ExceptionOr<void> MediaSource::setDurationInternal(const MediaTime& duration)
+{
+ // 2.4.6 Duration Change
+ // https://rawgit.com/w3c/media-source/45627646344eea0170dd1cbc5a3d508ca751abb8/media-source-respec.html#duration-change-algorithm
+
+ MediaTime newDuration = duration;
+
+ // 1. If the current value of duration is equal to new duration, then return.
+ if (newDuration == m_duration)
+ return { };
+
+ // 2. If new duration is less than the highest presentation timestamp of any buffered coded frames
+ // for all SourceBuffer objects in sourceBuffers, then throw an InvalidStateError exception and
+ // abort these steps.
+ // 3. Let highest end time be the largest track buffer ranges end time across all the track buffers
+ // across all SourceBuffer objects in sourceBuffers.
+ MediaTime highestPresentationTimestamp;
+ MediaTime highestEndTime;
+ for (auto& sourceBuffer : *m_sourceBuffers) {
+ highestPresentationTimestamp = std::max(highestPresentationTimestamp, sourceBuffer->highestPresentationTimestamp());
+ highestEndTime = std::max(highestEndTime, sourceBuffer->bufferedInternal().ranges().maximumBufferedTime());
+ }
+ if (highestPresentationTimestamp.isValid() && newDuration < highestPresentationTimestamp)
+ return Exception { INVALID_STATE_ERR };
+
+ // 4. If new duration is less than highest end time, then
+ // 4.1. Update new duration to equal highest end time.
+ if (highestEndTime.isValid() && newDuration < highestEndTime)
+ newDuration = highestEndTime;
+
+ // 5. Update duration to new duration.
+ m_duration = newDuration;
+
+ // 6. Update the media duration to new duration and run the HTMLMediaElement duration change algorithm.
+ LOG(MediaSource, "MediaSource::setDurationInternal(%p) - duration(%g)", this, duration.toDouble());
+ m_private->durationChanged();
+
+ return { };
+}
void MediaSource::setReadyState(const AtomicString& state)
{
ASSERT(state == openKeyword() || state == closedKeyword() || state == endedKeyword());
AtomicString oldState = readyState();
- LOG(Media, "MediaSource::setReadyState() %p : %s -> %s", this, oldState.string().ascii().data(), state.string().ascii().data());
-
- if (state == closedKeyword()) {
- m_private.clear();
- m_mediaElement = 0;
- }
+ LOG(MediaSource, "MediaSource::setReadyState(%p) : %s -> %s", this, oldState.string().ascii().data(), state.string().ascii().data());
if (oldState == state)
return;
@@ -304,55 +543,52 @@ void MediaSource::setReadyState(const AtomicString& state)
onReadyStateChange(oldState, state);
}
-static bool SourceBufferIsUpdating(RefPtr<SourceBuffer>& sourceBuffer)
-{
- return sourceBuffer->updating();
-}
-
-void MediaSource::endOfStream(const AtomicString& error, ExceptionCode& ec)
+ExceptionOr<void> MediaSource::endOfStream(std::optional<EndOfStreamError> error)
{
// 2.2 https://dvcs.w3.org/hg/html-media/raw-file/tip/media-source/media-source.html#widl-MediaSource-endOfStream-void-EndOfStreamError-error
// 1. If the readyState attribute is not in the "open" state then throw an
// INVALID_STATE_ERR exception and abort these steps.
- if (!isOpen()) {
- ec = INVALID_STATE_ERR;
- return;
- }
+ if (!isOpen())
+ return Exception { INVALID_STATE_ERR };
// 2. If the updating attribute equals true on any SourceBuffer in sourceBuffers, then throw an
// INVALID_STATE_ERR exception and abort these steps.
- if (std::any_of(m_sourceBuffers->begin(), m_sourceBuffers->end(), SourceBufferIsUpdating)) {
- ec = INVALID_STATE_ERR;
- return;
- }
+ if (std::any_of(m_sourceBuffers->begin(), m_sourceBuffers->end(), [](auto& sourceBuffer) { return sourceBuffer->updating(); }))
+ return Exception { INVALID_STATE_ERR };
// 3. Run the end of stream algorithm with the error parameter set to error.
- streamEndedWithError(error, ec);
+ streamEndedWithError(error);
+
+ return { };
}
-void MediaSource::streamEndedWithError(const AtomicString& error, ExceptionCode& ec)
+void MediaSource::streamEndedWithError(std::optional<EndOfStreamError> error)
{
- DEFINE_STATIC_LOCAL(const AtomicString, network, ("network", AtomicString::ConstructFromLiteral));
- DEFINE_STATIC_LOCAL(const AtomicString, decode, ("decode", AtomicString::ConstructFromLiteral));
+ LOG(MediaSource, "MediaSource::streamEndedWithError(%p)", this);
+ if (isClosed())
+ return;
// 2.4.7 https://dvcs.w3.org/hg/html-media/raw-file/tip/media-source/media-source.html#end-of-stream-algorithm
+
// 1. Change the readyState attribute value to "ended".
// 2. Queue a task to fire a simple event named sourceended at the MediaSource.
setReadyState(endedKeyword());
// 3.
- if (error.isEmpty()) {
+ if (!error) {
// ↳ If error is not set, is null, or is an empty string
- // 1. Run the duration change algorithm with new duration set to the highest end timestamp
- // across all SourceBuffer objects in sourceBuffers.
- MediaTime maxEndTimestamp;
- for (auto it = m_sourceBuffers->begin(), end = m_sourceBuffers->end(); it != end; ++it)
- maxEndTimestamp = std::max((*it)->highestPresentationEndTimestamp(), maxEndTimestamp);
- m_private->setDuration(maxEndTimestamp.toDouble());
+ // 1. Run the duration change algorithm with new duration set to the highest end time reported by
+ // the buffered attribute across all SourceBuffer objects in sourceBuffers.
+ MediaTime maxEndTime;
+ for (auto& sourceBuffer : *m_sourceBuffers) {
+ if (auto length = sourceBuffer->bufferedInternal().length())
+ maxEndTime = std::max(sourceBuffer->bufferedInternal().ranges().end(length - 1), maxEndTime);
+ }
+ setDurationInternal(maxEndTime);
// 2. Notify the media element that it now has all of the media data.
m_private->markEndOfStream(MediaSourcePrivate::EosNoError);
- } else if (error == network) {
+ } else if (error == EndOfStreamError::Network) {
// ↳ If error is set to "network"
ASSERT(m_mediaElement);
if (m_mediaElement->readyState() == HTMLMediaElement::HAVE_NOTHING) {
@@ -368,8 +604,9 @@ void MediaSource::streamEndedWithError(const AtomicString& error, ExceptionCode&
// NOTE: This step is handled by HTMLMediaElement::mediaLoadingFailedFatally().
m_mediaElement->mediaLoadingFailedFatally(MediaPlayer::NetworkError);
}
- } else if (error == decode) {
+ } else {
// ↳ If error is set to "decode"
+ ASSERT(error == EndOfStreamError::Decode);
ASSERT(m_mediaElement);
if (m_mediaElement->readyState() == HTMLMediaElement::HAVE_NOTHING) {
// ↳ If the HTMLMediaElement.readyState attribute equals HAVE_NOTHING
@@ -383,101 +620,98 @@ void MediaSource::streamEndedWithError(const AtomicString& error, ExceptionCode&
// NOTE: This step is handled by HTMLMediaElement::mediaLoadingFailedFatally().
m_mediaElement->mediaLoadingFailedFatally(MediaPlayer::DecodeError);
}
- } else {
- // ↳ Otherwise
- // Throw an INVALID_ACCESS_ERR exception.
- ec = INVALID_ACCESS_ERR;
}
}
-SourceBuffer* MediaSource::addSourceBuffer(const String& type, ExceptionCode& ec)
+ExceptionOr<SourceBuffer&> MediaSource::addSourceBuffer(const String& type)
{
- LOG(Media, "MediaSource::addSourceBuffer(%s) %p", type.ascii().data(), this);
+ LOG(MediaSource, "MediaSource::addSourceBuffer(%s) %p", type.ascii().data(), this);
- // 2.2 https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#widl-MediaSource-addSourceBuffer-SourceBuffer-DOMString-type
- // 1. If type is null or an empty then throw an INVALID_ACCESS_ERR exception and
- // abort these steps.
- if (type.isNull() || type.isEmpty()) {
- ec = INVALID_ACCESS_ERR;
- return nullptr;
- }
+ // 2.2 http://www.w3.org/TR/media-source/#widl-MediaSource-addSourceBuffer-SourceBuffer-DOMString-type
+ // When this method is invoked, the user agent must run the following steps:
+
+ // 1. If type is an empty string then throw a TypeError exception and abort these steps.
+ if (type.isEmpty())
+ return Exception { TypeError };
// 2. If type contains a MIME type that is not supported ..., then throw a
// NOT_SUPPORTED_ERR exception and abort these steps.
- if (!isTypeSupported(type)) {
- ec = NOT_SUPPORTED_ERR;
- return nullptr;
- }
+ if (!isTypeSupported(type))
+ return Exception { NOT_SUPPORTED_ERR };
// 4. If the readyState attribute is not in the "open" state then throw an
// INVALID_STATE_ERR exception and abort these steps.
- if (!isOpen()) {
- ec = INVALID_STATE_ERR;
- return nullptr;
- }
+ if (!isOpen())
+ return Exception { INVALID_STATE_ERR };
// 5. Create a new SourceBuffer object and associated resources.
ContentType contentType(type);
- RefPtr<SourceBufferPrivate> sourceBufferPrivate = createSourceBufferPrivate(contentType, ec);
+ auto sourceBufferPrivate = createSourceBufferPrivate(contentType);
- if (!sourceBufferPrivate) {
- ASSERT(ec == NOT_SUPPORTED_ERR || ec == QUOTA_EXCEEDED_ERR);
+ if (sourceBufferPrivate.hasException()) {
// 2. If type contains a MIME type that is not supported ..., then throw a NOT_SUPPORTED_ERR exception and abort these steps.
// 3. If the user agent can't handle any more SourceBuffer objects then throw a QUOTA_EXCEEDED_ERR exception and abort these steps
- return nullptr;
+ return sourceBufferPrivate.releaseException();
}
- RefPtr<SourceBuffer> buffer = SourceBuffer::create(sourceBufferPrivate.releaseNonNull(), this);
- // 6. Add the new object to sourceBuffers and fire a addsourcebuffer on that object.
- m_sourceBuffers->add(buffer);
- m_activeSourceBuffers->add(buffer);
- // 7. Return the new object to the caller.
- return buffer.get();
+ auto buffer = SourceBuffer::create(sourceBufferPrivate.releaseReturnValue(), this);
+
+ // 6. Set the generate timestamps flag on the new object to the value in the "Generate Timestamps Flag"
+ // column of the byte stream format registry [MSE-REGISTRY] entry that is associated with type.
+ // NOTE: In the current byte stream format registry <http://www.w3.org/2013/12/byte-stream-format-registry/>
+ // only the "MPEG Audio Byte Stream Format" has the "Generate Timestamps Flag" value set.
+ bool shouldGenerateTimestamps = contentType.type() == "audio/aac" || contentType.type() == "audio/mpeg";
+ buffer->setShouldGenerateTimestamps(shouldGenerateTimestamps);
+
+ // 7. If the generate timestamps flag equals true:
+ // ↳ Set the mode attribute on the new object to "sequence".
+ // Otherwise:
+ // ↳ Set the mode attribute on the new object to "segments".
+ buffer->setMode(shouldGenerateTimestamps ? SourceBuffer::AppendMode::Sequence : SourceBuffer::AppendMode::Segments);
+
+ auto& result = buffer.get();
+
+ // 8. Add the new object to sourceBuffers and fire a addsourcebuffer on that object.
+ m_sourceBuffers->add(WTFMove(buffer));
+ regenerateActiveSourceBuffers();
+
+ // 9. Return the new object to the caller.
+ return result;
}
-void MediaSource::removeSourceBuffer(SourceBuffer* buffer, ExceptionCode& ec)
+ExceptionOr<void> MediaSource::removeSourceBuffer(SourceBuffer& buffer)
{
- LOG(Media, "MediaSource::removeSourceBuffer() %p", this);
- RefPtr<SourceBuffer> protect(buffer);
-
- // 2.2 https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#widl-MediaSource-removeSourceBuffer-void-SourceBuffer-sourceBuffer
- // 1. If sourceBuffer is null then throw an INVALID_ACCESS_ERR exception and
- // abort these steps.
- if (!buffer) {
- ec = INVALID_ACCESS_ERR;
- return;
- }
+ LOG(MediaSource, "MediaSource::removeSourceBuffer() %p", this);
+ Ref<SourceBuffer> protect(buffer);
// 2. If sourceBuffer specifies an object that is not in sourceBuffers then
// throw a NOT_FOUND_ERR exception and abort these steps.
- if (!m_sourceBuffers->length() || !m_sourceBuffers->contains(buffer)) {
- ec = NOT_FOUND_ERR;
- return;
- }
+ if (!m_sourceBuffers->length() || !m_sourceBuffers->contains(buffer))
+ return Exception { NOT_FOUND_ERR };
// 3. If the sourceBuffer.updating attribute equals true, then run the following steps: ...
- buffer->abortIfUpdating();
+ buffer.abortIfUpdating();
// 4. Let SourceBuffer audioTracks list equal the AudioTrackList object returned by sourceBuffer.audioTracks.
- RefPtr<AudioTrackList> audioTracks = buffer->audioTracks();
+ auto& audioTracks = buffer.audioTracks();
// 5. If the SourceBuffer audioTracks list is not empty, then run the following steps:
- if (audioTracks->length()) {
+ if (audioTracks.length()) {
// 5.1 Let HTMLMediaElement audioTracks list equal the AudioTrackList object returned by the audioTracks
// attribute on the HTMLMediaElement.
// 5.2 Let the removed enabled audio track flag equal false.
bool removedEnabledAudioTrack = false;
// 5.3 For each AudioTrack object in the SourceBuffer audioTracks list, run the following steps:
- while (audioTracks->length()) {
- AudioTrack* track = audioTracks->lastItem();
+ while (audioTracks.length()) {
+ auto& track = *audioTracks.lastItem();
// 5.3.1 Set the sourceBuffer attribute on the AudioTrack object to null.
- track->setSourceBuffer(0);
+ track.setSourceBuffer(nullptr);
// 5.3.2 If the enabled attribute on the AudioTrack object is true, then set the removed enabled
// audio track flag to true.
- if (track->enabled())
+ if (track.enabled())
removedEnabledAudioTrack = true;
// 5.3.3 Remove the AudioTrack object from the HTMLMediaElement audioTracks list.
@@ -489,35 +723,35 @@ void MediaSource::removeSourceBuffer(SourceBuffer* buffer, ExceptionCode& ec)
// 5.3.5 Remove the AudioTrack object from the SourceBuffer audioTracks list.
// 5.3.6 Queue a task to fire a trusted event named removetrack, that does not bubble and is not
// cancelable, and that uses the TrackEvent interface, at the SourceBuffer audioTracks list.
- audioTracks->remove(track);
+ audioTracks.remove(track);
}
// 5.4 If the removed enabled audio track flag equals true, then queue a task to fire a simple event
// named change at the HTMLMediaElement audioTracks list.
if (removedEnabledAudioTrack)
- mediaElement()->audioTracks()->scheduleChangeEvent();
+ mediaElement()->audioTracks().scheduleChangeEvent();
}
// 6. Let SourceBuffer videoTracks list equal the VideoTrackList object returned by sourceBuffer.videoTracks.
- RefPtr<VideoTrackList> videoTracks = buffer->videoTracks();
+ auto& videoTracks = buffer.videoTracks();
// 7. If the SourceBuffer videoTracks list is not empty, then run the following steps:
- if (videoTracks->length()) {
+ if (videoTracks.length()) {
// 7.1 Let HTMLMediaElement videoTracks list equal the VideoTrackList object returned by the videoTracks
// attribute on the HTMLMediaElement.
// 7.2 Let the removed selected video track flag equal false.
bool removedSelectedVideoTrack = false;
// 7.3 For each VideoTrack object in the SourceBuffer videoTracks list, run the following steps:
- while (videoTracks->length()) {
- VideoTrack* track = videoTracks->lastItem();
+ while (videoTracks.length()) {
+ auto& track = *videoTracks.lastItem();
// 7.3.1 Set the sourceBuffer attribute on the VideoTrack object to null.
- track->setSourceBuffer(0);
+ track.setSourceBuffer(nullptr);
// 7.3.2 If the selected attribute on the VideoTrack object is true, then set the removed selected
// video track flag to true.
- if (track->selected())
+ if (track.selected())
removedSelectedVideoTrack = true;
// 7.3.3 Remove the VideoTrack object from the HTMLMediaElement videoTracks list.
@@ -529,35 +763,35 @@ void MediaSource::removeSourceBuffer(SourceBuffer* buffer, ExceptionCode& ec)
// 7.3.5 Remove the VideoTrack object from the SourceBuffer videoTracks list.
// 7.3.6 Queue a task to fire a trusted event named removetrack, that does not bubble and is not
// cancelable, and that uses the TrackEvent interface, at the SourceBuffer videoTracks list.
- videoTracks->remove(track);
+ videoTracks.remove(track);
}
// 7.4 If the removed selected video track flag equals true, then queue a task to fire a simple event
// named change at the HTMLMediaElement videoTracks list.
if (removedSelectedVideoTrack)
- mediaElement()->videoTracks()->scheduleChangeEvent();
+ mediaElement()->videoTracks().scheduleChangeEvent();
}
// 8. Let SourceBuffer textTracks list equal the TextTrackList object returned by sourceBuffer.textTracks.
- RefPtr<TextTrackList> textTracks = buffer->textTracks();
+ auto& textTracks = buffer.textTracks();
// 9. If the SourceBuffer textTracks list is not empty, then run the following steps:
- if (textTracks->length()) {
+ if (textTracks.length()) {
// 9.1 Let HTMLMediaElement textTracks list equal the TextTrackList object returned by the textTracks
// attribute on the HTMLMediaElement.
// 9.2 Let the removed enabled text track flag equal false.
bool removedEnabledTextTrack = false;
// 9.3 For each TextTrack object in the SourceBuffer textTracks list, run the following steps:
- while (textTracks->length()) {
- TextTrack* track = textTracks->lastItem();
+ while (textTracks.length()) {
+ auto& track = *textTracks.lastItem();
// 9.3.1 Set the sourceBuffer attribute on the TextTrack object to null.
- track->setSourceBuffer(0);
+ track.setSourceBuffer(nullptr);
// 9.3.2 If the mode attribute on the TextTrack object is set to "showing" or "hidden", then
// set the removed enabled text track flag to true.
- if (track->mode() == TextTrack::showingKeyword() || track->mode() == TextTrack::hiddenKeyword())
+ if (track.mode() == TextTrack::Mode::Showing || track.mode() == TextTrack::Mode::Hidden)
removedEnabledTextTrack = true;
// 9.3.3 Remove the TextTrack object from the HTMLMediaElement textTracks list.
@@ -569,30 +803,31 @@ void MediaSource::removeSourceBuffer(SourceBuffer* buffer, ExceptionCode& ec)
// 9.3.5 Remove the TextTrack object from the SourceBuffer textTracks list.
// 9.3.6 Queue a task to fire a trusted event named removetrack, that does not bubble and is not
// cancelable, and that uses the TrackEvent interface, at the SourceBuffer textTracks list.
- textTracks->remove(track);
+ textTracks.remove(track);
}
-
+
// 9.4 If the removed enabled text track flag equals true, then queue a task to fire a simple event
// named change at the HTMLMediaElement textTracks list.
if (removedEnabledTextTrack)
- mediaElement()->textTracks()->scheduleChangeEvent();
+ mediaElement()->textTracks().scheduleChangeEvent();
}
-
-
+
// 10. If sourceBuffer is in activeSourceBuffers, then remove sourceBuffer from activeSourceBuffers ...
m_activeSourceBuffers->remove(buffer);
-
+
// 11. Remove sourceBuffer from sourceBuffers and fire a removesourcebuffer event
// on that object.
m_sourceBuffers->remove(buffer);
-
+
// 12. Destroy all resources for sourceBuffer.
- buffer->removedFromMediaSource();
+ buffer.removedFromMediaSource();
+
+ return { };
}
bool MediaSource::isTypeSupported(const String& type)
{
- LOG(Media, "MediaSource::isTypeSupported(%s)", type.ascii().data());
+ LOG(MediaSource, "MediaSource::isTypeSupported(%s)", type.ascii().data());
// Section 2.2 isTypeSupported() method steps.
// https://dvcs.w3.org/hg/html-media/raw-file/tip/media-source/media-source.html#widl-MediaSource-isTypeSupported-boolean-DOMString-type
@@ -600,11 +835,12 @@ bool MediaSource::isTypeSupported(const String& type)
if (type.isNull() || type.isEmpty())
return false;
- ContentType contentType(type);
+ // FIXME: Why do we convert to lowercase here, but not in MediaSource::addSourceBuffer?
+ ContentType contentType(type.convertToASCIILowercase());
String codecs = contentType.parameter("codecs");
// 2. If type does not contain a valid MIME type string, then return false.
- if (contentType.type().isEmpty() || codecs.isEmpty())
+ if (contentType.type().isEmpty())
return false;
// 3. If type contains a media type or media subtype that the MediaSource does not support, then return false.
@@ -615,7 +851,12 @@ bool MediaSource::isTypeSupported(const String& type)
parameters.type = contentType.type();
parameters.codecs = codecs;
parameters.isMediaSource = true;
- return MediaPlayer::supportsType(parameters, 0) != MediaPlayer::IsNotSupported;
+ MediaPlayer::SupportsType supported = MediaPlayer::supportsType(parameters, 0);
+
+ if (codecs.isEmpty())
+ return supported != MediaPlayer::IsNotSupported;
+
+ return supported == MediaPlayer::IsSupported;
}
bool MediaSource::isOpen() const
@@ -628,27 +869,52 @@ bool MediaSource::isClosed() const
return readyState() == closedKeyword();
}
-void MediaSource::close()
+bool MediaSource::isEnded() const
{
+ return readyState() == endedKeyword();
+}
+
+void MediaSource::detachFromElement(HTMLMediaElement& element)
+{
+ ASSERT_UNUSED(element, m_mediaElement == &element);
+
+ // 2.4.2 Detaching from a media element
+ // https://rawgit.com/w3c/media-source/45627646344eea0170dd1cbc5a3d508ca751abb8/media-source-respec.html#mediasource-detach
+
+ // 1. Set the readyState attribute to "closed".
+ // 7. Queue a task to fire a simple event named sourceclose at the MediaSource.
setReadyState(closedKeyword());
+
+ // 2. Update duration to NaN.
+ m_duration = MediaTime::invalidTime();
+
+ // 3. Remove all the SourceBuffer objects from activeSourceBuffers.
+ // 4. Queue a task to fire a simple event named removesourcebuffer at activeSourceBuffers.
+ while (m_activeSourceBuffers->length())
+ removeSourceBuffer(*m_activeSourceBuffers->item(0));
+
+ // 5. Remove all the SourceBuffer objects from sourceBuffers.
+ // 6. Queue a task to fire a simple event named removesourcebuffer at sourceBuffers.
+ while (m_sourceBuffers->length())
+ removeSourceBuffer(*m_sourceBuffers->item(0));
+
+ m_private = nullptr;
+ m_mediaElement = nullptr;
}
-void MediaSource::sourceBufferDidChangeAcitveState(SourceBuffer* sourceBuffer, bool active)
+void MediaSource::sourceBufferDidChangeActiveState(SourceBuffer&, bool)
{
- if (active && !m_activeSourceBuffers->contains(sourceBuffer))
- m_activeSourceBuffers->add(sourceBuffer);
- else if (!active && m_activeSourceBuffers->contains(sourceBuffer))
- m_activeSourceBuffers->remove(sourceBuffer);
+ regenerateActiveSourceBuffers();
}
-bool MediaSource::attachToElement(HTMLMediaElement* element)
+bool MediaSource::attachToElement(HTMLMediaElement& element)
{
if (m_mediaElement)
return false;
ASSERT(isClosed());
- m_mediaElement = element;
+ m_mediaElement = &element;
return true;
}
@@ -670,13 +936,27 @@ bool MediaSource::hasPendingActivity() const
void MediaSource::stop()
{
m_asyncEventQueue.close();
- if (!isClosed())
- setReadyState(closedKeyword());
- m_private.clear();
+ if (m_mediaElement)
+ m_mediaElement->detachMediaSource();
+ m_readyState = closedKeyword();
+ m_private = nullptr;
+}
+
+bool MediaSource::canSuspendForDocumentSuspension() const
+{
+ return isClosed() && !m_asyncEventQueue.hasPendingEvents();
+}
+
+const char* MediaSource::activeDOMObjectName() const
+{
+ return "MediaSource";
}
void MediaSource::onReadyStateChange(const AtomicString& oldState, const AtomicString& newState)
{
+ for (auto& buffer : *m_sourceBuffers)
+ buffer->readyStateChanged();
+
if (isOpen()) {
scheduleEvent(eventNames().sourceopenEvent);
return;
@@ -688,58 +968,46 @@ void MediaSource::onReadyStateChange(const AtomicString& oldState, const AtomicS
}
ASSERT(isClosed());
-
- m_activeSourceBuffers->clear();
-
- // Clear SourceBuffer references to this object.
- for (unsigned long i = 0, length = m_sourceBuffers->length(); i < length; ++i)
- m_sourceBuffers->item(i)->removedFromMediaSource();
- m_sourceBuffers->clear();
-
scheduleEvent(eventNames().sourcecloseEvent);
}
-Vector<RefPtr<TimeRanges>> MediaSource::activeRanges() const
+Vector<PlatformTimeRanges> MediaSource::activeRanges() const
{
- Vector<RefPtr<TimeRanges>> activeRanges(m_activeSourceBuffers->length());
- for (size_t i = 0, length = m_activeSourceBuffers->length(); i < length; ++i)
- activeRanges[i] = m_activeSourceBuffers->item(i)->buffered(ASSERT_NO_EXCEPTION);
-
+ Vector<PlatformTimeRanges> activeRanges;
+ for (auto& sourceBuffer : *m_activeSourceBuffers)
+ activeRanges.append(sourceBuffer->bufferedInternal().ranges());
return activeRanges;
}
-RefPtr<SourceBufferPrivate> MediaSource::createSourceBufferPrivate(const ContentType& type, ExceptionCode& ec)
+ExceptionOr<Ref<SourceBufferPrivate>> MediaSource::createSourceBufferPrivate(const ContentType& type)
{
RefPtr<SourceBufferPrivate> sourceBufferPrivate;
switch (m_private->addSourceBuffer(type, sourceBufferPrivate)) {
- case MediaSourcePrivate::Ok: {
- return sourceBufferPrivate;
- }
+ case MediaSourcePrivate::Ok:
+ return sourceBufferPrivate.releaseNonNull();
case MediaSourcePrivate::NotSupported:
// 2.2 https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#widl-MediaSource-addSourceBuffer-SourceBuffer-DOMString-type
// Step 2: If type contains a MIME type ... that is not supported with the types
// specified for the other SourceBuffer objects in sourceBuffers, then throw
// a NOT_SUPPORTED_ERR exception and abort these steps.
- ec = NOT_SUPPORTED_ERR;
- return nullptr;
+ return Exception { NOT_SUPPORTED_ERR };
case MediaSourcePrivate::ReachedIdLimit:
// 2.2 https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#widl-MediaSource-addSourceBuffer-SourceBuffer-DOMString-type
// Step 3: If the user agent can't handle any more SourceBuffer objects then throw
// a QUOTA_EXCEEDED_ERR exception and abort these steps.
- ec = QUOTA_EXCEEDED_ERR;
- return nullptr;
+ return Exception { QUOTA_EXCEEDED_ERR };
}
ASSERT_NOT_REACHED();
- return nullptr;
+ return Exception { QUOTA_EXCEEDED_ERR };
}
void MediaSource::scheduleEvent(const AtomicString& eventName)
{
- RefPtr<Event> event = Event::create(eventName, false, false);
+ auto event = Event::create(eventName, false, false);
event->setTarget(this);
- m_asyncEventQueue.enqueueEvent(event.release());
+ m_asyncEventQueue.enqueueEvent(WTFMove(event));
}
ScriptExecutionContext* MediaSource::scriptExecutionContext() const
@@ -757,6 +1025,18 @@ URLRegistry& MediaSource::registry() const
return MediaSourceRegistry::registry();
}
+void MediaSource::regenerateActiveSourceBuffers()
+{
+ Vector<RefPtr<SourceBuffer>> newList;
+ for (auto& sourceBuffer : *m_sourceBuffers) {
+ if (sourceBuffer->active())
+ newList.append(sourceBuffer);
+ }
+ m_activeSourceBuffers->swap(newList);
+ for (auto& sourceBuffer : *m_activeSourceBuffers)
+ sourceBuffer->setBufferedDirty(true);
+}
+
}
#endif
diff --git a/Source/WebCore/Modules/mediasource/MediaSource.h b/Source/WebCore/Modules/mediasource/MediaSource.h
index 131b549d0..549319f12 100644
--- a/Source/WebCore/Modules/mediasource/MediaSource.h
+++ b/Source/WebCore/Modules/mediasource/MediaSource.h
@@ -28,99 +28,125 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef MediaSource_h
-#define MediaSource_h
+#pragma once
#if ENABLE(MEDIA_SOURCE)
#include "ActiveDOMObject.h"
#include "EventTarget.h"
+#include "ExceptionOr.h"
#include "GenericEventQueue.h"
-#include "HTMLMediaSource.h"
-#include "MediaSourcePrivate.h"
-#include "ScriptWrappable.h"
-#include "SourceBuffer.h"
-#include "SourceBufferList.h"
+#include "MediaSourcePrivateClient.h"
#include "URLRegistry.h"
-#include <wtf/PassOwnPtr.h>
-#include <wtf/RefCounted.h>
-#include <wtf/Vector.h>
namespace WebCore {
-class GenericEventQueue;
+class ContentType;
+class HTMLMediaElement;
+class SourceBuffer;
+class SourceBufferList;
+class SourceBufferPrivate;
+class TimeRanges;
-class MediaSource : public RefCounted<MediaSource>, public HTMLMediaSource, public ActiveDOMObject, public EventTargetWithInlineData, public ScriptWrappable {
+class MediaSource final : public MediaSourcePrivateClient, public ActiveDOMObject, public EventTargetWithInlineData, public URLRegistrable {
public:
- static const AtomicString& openKeyword();
- static const AtomicString& closedKeyword();
- static const AtomicString& endedKeyword();
+ static void setRegistry(URLRegistry*);
+ static MediaSource* lookup(const String& url) { return s_registry ? static_cast<MediaSource*>(s_registry->lookup(url)) : nullptr; }
- static PassRefPtr<MediaSource> create(ScriptExecutionContext&);
+ static Ref<MediaSource> create(ScriptExecutionContext&);
virtual ~MediaSource();
void addedToRegistry();
void removedFromRegistry();
void openIfInEndedState();
bool isOpen() const;
- void sourceBufferDidChangeAcitveState(SourceBuffer*, bool);
- void streamEndedWithError(const AtomicString& error, ExceptionCode&);
-
- // HTMLMediaSource
- virtual bool attachToElement(HTMLMediaElement*) override;
- virtual void setPrivateAndOpen(PassRef<MediaSourcePrivate>) override;
- virtual void close() override;
- virtual bool isClosed() const override;
- virtual double duration() const override;
- virtual PassRefPtr<TimeRanges> buffered() const override;
- virtual void refHTMLMediaSource() override { ref(); }
- virtual void derefHTMLMediaSource() override { deref(); }
- virtual void monitorSourceBuffers() override;
-
- void setDuration(double, ExceptionCode&);
+ bool isClosed() const;
+ bool isEnded() const;
+ void sourceBufferDidChangeActiveState(SourceBuffer&, bool);
+
+ enum class EndOfStreamError { Network, Decode };
+ void streamEndedWithError(std::optional<EndOfStreamError>);
+
+ MediaTime duration() const final;
+ void durationChanged(const MediaTime&) final;
+ std::unique_ptr<PlatformTimeRanges> buffered() const final;
+
+ bool attachToElement(HTMLMediaElement&);
+ void detachFromElement(HTMLMediaElement&);
+ void monitorSourceBuffers() override;
+ bool isSeeking() const { return m_pendingSeekTime.isValid(); }
+ Ref<TimeRanges> seekable();
+ ExceptionOr<void> setLiveSeekableRange(double start, double end);
+ ExceptionOr<void> clearLiveSeekableRange();
+
+ ExceptionOr<void> setDuration(double);
+ ExceptionOr<void> setDurationInternal(const MediaTime&);
+ MediaTime currentTime() const;
const AtomicString& readyState() const { return m_readyState; }
- void setReadyState(const AtomicString&);
- void endOfStream(const AtomicString& error, ExceptionCode&);
+ ExceptionOr<void> endOfStream(std::optional<EndOfStreamError>);
HTMLMediaElement* mediaElement() const { return m_mediaElement; }
- // MediaSource.idl methods
SourceBufferList* sourceBuffers() { return m_sourceBuffers.get(); }
SourceBufferList* activeSourceBuffers() { return m_activeSourceBuffers.get(); }
- SourceBuffer* addSourceBuffer(const String& type, ExceptionCode&);
- void removeSourceBuffer(SourceBuffer*, ExceptionCode&);
+ ExceptionOr<SourceBuffer&> addSourceBuffer(const String& type);
+ ExceptionOr<void> removeSourceBuffer(SourceBuffer&);
static bool isTypeSupported(const String& type);
- // ActiveDOMObject interface
- virtual bool hasPendingActivity() const override;
- virtual void stop() override;
+ ScriptExecutionContext* scriptExecutionContext() const final;
- // EventTarget interface
- virtual ScriptExecutionContext* scriptExecutionContext() const override final;
- virtual void refEventTarget() override final { ref(); }
- virtual void derefEventTarget() override final { deref(); }
- virtual EventTargetInterface eventTargetInterface() const override;
+ using RefCounted::ref;
+ using RefCounted::deref;
- // URLRegistrable interface
- virtual URLRegistry& registry() const override;
+ bool hasPendingActivity() const final;
- using RefCounted<MediaSource>::ref;
- using RefCounted<MediaSource>::deref;
+ static const MediaTime& currentTimeFudgeFactor();
-protected:
+private:
explicit MediaSource(ScriptExecutionContext&);
+ void stop() final;
+ bool canSuspendForDocumentSuspension() const final;
+ const char* activeDOMObjectName() const final;
+
+ void setPrivateAndOpen(Ref<MediaSourcePrivate>&&) final;
+ void seekToTime(const MediaTime&) final;
+
+ void refEventTarget() final { ref(); }
+ void derefEventTarget() final { deref(); }
+ EventTargetInterface eventTargetInterface() const final;
+
+ URLRegistry& registry() const final;
+
+ static const AtomicString& openKeyword();
+ static const AtomicString& closedKeyword();
+ static const AtomicString& endedKeyword();
+ void setReadyState(const AtomicString&);
void onReadyStateChange(const AtomicString& oldState, const AtomicString& newState);
- Vector<RefPtr<TimeRanges>> activeRanges() const;
- RefPtr<SourceBufferPrivate> createSourceBufferPrivate(const ContentType&, ExceptionCode&);
+ Vector<PlatformTimeRanges> activeRanges() const;
+
+ ExceptionOr<Ref<SourceBufferPrivate>> createSourceBufferPrivate(const ContentType&);
void scheduleEvent(const AtomicString& eventName);
- GenericEventQueue& asyncEventQueue() { return m_asyncEventQueue; }
+
+ bool hasBufferedTime(const MediaTime&);
+ bool hasCurrentTime();
+ bool hasFutureTime();
+
+ void regenerateActiveSourceBuffers();
+
+ void completeSeek();
+
+ static URLRegistry* s_registry;
RefPtr<MediaSourcePrivate> m_private;
RefPtr<SourceBufferList> m_sourceBuffers;
RefPtr<SourceBufferList> m_activeSourceBuffers;
- HTMLMediaElement* m_mediaElement;
+ mutable std::unique_ptr<PlatformTimeRanges> m_buffered;
+ std::unique_ptr<PlatformTimeRanges> m_liveSeekable;
+ HTMLMediaElement* m_mediaElement { nullptr };
+ MediaTime m_duration;
+ MediaTime m_pendingSeekTime;
AtomicString m_readyState;
GenericEventQueue m_asyncEventQueue;
};
@@ -128,5 +154,3 @@ protected:
}
#endif
-
-#endif
diff --git a/Source/WebCore/Modules/mediasource/MediaSource.idl b/Source/WebCore/Modules/mediasource/MediaSource.idl
index 3cb8ebd87..5a9fef118 100644
--- a/Source/WebCore/Modules/mediasource/MediaSource.idl
+++ b/Source/WebCore/Modules/mediasource/MediaSource.idl
@@ -28,15 +28,17 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+enum EndOfStreamError {
+ "network",
+ "decode"
+};
+
[
- Conditional=MEDIA_SOURCE,
ActiveDOMObject,
- EventTarget,
- EnabledBySetting=MediaSource,
- JSGenerateToJSObject,
- JSGenerateToNativeObject,
+ Conditional=MEDIA_SOURCE,
Constructor,
ConstructorCallWith=ScriptExecutionContext,
+ EnabledBySetting=MediaSource,
] interface MediaSource : EventTarget {
// All the source buffers created by this object.
readonly attribute SourceBufferList sourceBuffers;
@@ -44,14 +46,21 @@
// Subset of sourceBuffers that provide data for the selected/enabled tracks.
readonly attribute SourceBufferList activeSourceBuffers;
- [SetterRaisesException] attribute double duration;
+ [SetterMayThrowException] attribute unrestricted double duration;
- [RaisesException] SourceBuffer addSourceBuffer(DOMString type);
- [RaisesException] void removeSourceBuffer(SourceBuffer buffer);
+ [MayThrowException] SourceBuffer addSourceBuffer(DOMString type);
+ [MayThrowException] void removeSourceBuffer(SourceBuffer buffer);
readonly attribute DOMString readyState;
- [RaisesException] void endOfStream([Default=NullString] optional DOMString error);
+ [MayThrowException] void endOfStream(optional EndOfStreamError error);
static boolean isTypeSupported (DOMString type);
+
+ [MayThrowException] void setLiveSeekableRange(double start, double end);
+ [MayThrowException] void clearLiveSeekableRange();
+
+ attribute EventHandler onsourceopen;
+ attribute EventHandler onsourceended;
+ attribute EventHandler onsourceclose;
};
diff --git a/Source/WebCore/Modules/mediasource/MediaSourceRegistry.cpp b/Source/WebCore/Modules/mediasource/MediaSourceRegistry.cpp
index a65308860..b6974c2ca 100644
--- a/Source/WebCore/Modules/mediasource/MediaSourceRegistry.cpp
+++ b/Source/WebCore/Modules/mediasource/MediaSourceRegistry.cpp
@@ -42,18 +42,18 @@ namespace WebCore {
MediaSourceRegistry& MediaSourceRegistry::registry()
{
ASSERT(isMainThread());
- DEFINE_STATIC_LOCAL(MediaSourceRegistry, instance, ());
+ static NeverDestroyed<MediaSourceRegistry> instance;
return instance;
}
-void MediaSourceRegistry::registerURL(SecurityOrigin*, const URL& url, URLRegistrable* registrable)
+void MediaSourceRegistry::registerURL(SecurityOrigin*, const URL& url, URLRegistrable& registrable)
{
- ASSERT(&registrable->registry() == this);
+ ASSERT(&registrable.registry() == this);
ASSERT(isMainThread());
- MediaSource* source = static_cast<MediaSource*>(registrable);
- source->addedToRegistry();
- m_mediaSources.set(url.string(), source);
+ MediaSource& source = static_cast<MediaSource&>(registrable);
+ source.addedToRegistry();
+ m_mediaSources.set(url.string(), &source);
}
void MediaSourceRegistry::unregisterURL(const URL& url)
@@ -76,7 +76,7 @@ URLRegistrable* MediaSourceRegistry::lookup(const String& url) const
MediaSourceRegistry::MediaSourceRegistry()
{
- HTMLMediaSource::setRegistry(this);
+ MediaSource::setRegistry(this);
}
} // namespace WebCore
diff --git a/Source/WebCore/Modules/mediasource/MediaSourceRegistry.h b/Source/WebCore/Modules/mediasource/MediaSourceRegistry.h
index 9974ea4af..eb87b40dc 100644
--- a/Source/WebCore/Modules/mediasource/MediaSourceRegistry.h
+++ b/Source/WebCore/Modules/mediasource/MediaSourceRegistry.h
@@ -28,14 +28,13 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef MediaSourceRegistry_h
-#define MediaSourceRegistry_h
+#pragma once
#if ENABLE(MEDIA_SOURCE)
#include "URLRegistry.h"
#include <wtf/HashMap.h>
-#include <wtf/PassRefPtr.h>
+#include <wtf/NeverDestroyed.h>
#include <wtf/text/StringHash.h>
namespace WebCore {
@@ -44,14 +43,15 @@ class URL;
class MediaSource;
class MediaSourceRegistry final : public URLRegistry {
+ friend class NeverDestroyed<MediaSourceRegistry>;
public:
// Returns a single instance of MediaSourceRegistry.
static MediaSourceRegistry& registry();
// Registers a blob URL referring to the specified media source.
- virtual void registerURL(SecurityOrigin*, const URL&, URLRegistrable*) override;
- virtual void unregisterURL(const URL&) override;
- virtual URLRegistrable* lookup(const String&) const override;
+ void registerURL(SecurityOrigin*, const URL&, URLRegistrable&) override;
+ void unregisterURL(const URL&) override;
+ URLRegistrable* lookup(const String&) const override;
private:
MediaSourceRegistry();
@@ -60,5 +60,4 @@ private:
} // namespace WebCore
-#endif
-#endif
+#endif // ENABLE(MEDIA_SOURCE)
diff --git a/Source/WebCore/Modules/mediasource/SampleMap.cpp b/Source/WebCore/Modules/mediasource/SampleMap.cpp
index 6cdf37e77..42157c5fc 100644
--- a/Source/WebCore/Modules/mediasource/SampleMap.cpp
+++ b/Source/WebCore/Modules/mediasource/SampleMap.cpp
@@ -32,28 +32,32 @@
namespace WebCore {
+template <typename M>
class SampleIsLessThanMediaTimeComparator {
public:
- bool operator()(std::pair<MediaTime, RefPtr<MediaSample>> value, MediaTime time)
+ typedef typename M::value_type value_type;
+ bool operator()(const value_type& value, const MediaTime& time)
{
MediaTime presentationEndTime = value.second->presentationTime() + value.second->duration();
return presentationEndTime <= time;
}
- bool operator()(MediaTime time, std::pair<MediaTime, RefPtr<MediaSample>> value)
+ bool operator()(const MediaTime& time, const value_type& value)
{
MediaTime presentationStartTime = value.second->presentationTime();
return time < presentationStartTime;
}
};
+template <typename M>
class SampleIsGreaterThanMediaTimeComparator {
public:
- bool operator()(std::pair<MediaTime, RefPtr<MediaSample>> value, MediaTime time)
+ typedef typename M::value_type value_type;
+ bool operator()(const value_type& value, const MediaTime& time)
{
MediaTime presentationStartTime = value.second->presentationTime();
return presentationStartTime > time;
}
- bool operator()(MediaTime time, std::pair<MediaTime, RefPtr<MediaSample>> value)
+ bool operator()(const MediaTime& time, const value_type& value)
{
MediaTime presentationEndTime = value.second->presentationTime() + value.second->duration();
return time >= presentationEndTime;
@@ -62,125 +66,228 @@ public:
class SampleIsRandomAccess {
public:
- bool operator()(std::pair<MediaTime, RefPtr<MediaSample>> value)
+ bool operator()(DecodeOrderSampleMap::MapType::value_type& value)
{
return value.second->flags() == MediaSample::IsSync;
}
};
-class SamplePresentationTimeIsWithinRangeComparator {
-public:
- bool operator()(std::pair<MediaTime, MediaTime> range, std::pair<MediaTime, RefPtr<MediaSample>> value)
+// SamplePresentationTimeIsInsideRangeComparator matches (range.first, range.second]
+struct SamplePresentationTimeIsInsideRangeComparator {
+ bool operator()(std::pair<MediaTime, MediaTime> range, const std::pair<MediaTime, RefPtr<MediaSample>>& value)
{
return range.second < value.first;
}
- bool operator()(std::pair<MediaTime, RefPtr<MediaSample>> value, std::pair<MediaTime, MediaTime> range)
+ bool operator()(const std::pair<MediaTime, RefPtr<MediaSample>>& value, std::pair<MediaTime, MediaTime> range)
+ {
+ return value.first <= range.first;
+ }
+};
+
+// SamplePresentationTimeIsWithinRangeComparator matches [range.first, range.second)
+struct SamplePresentationTimeIsWithinRangeComparator {
+ bool operator()(std::pair<MediaTime, MediaTime> range, const std::pair<MediaTime, RefPtr<MediaSample>>& value)
+ {
+ return range.second <= value.first;
+ }
+ bool operator()(const std::pair<MediaTime, RefPtr<MediaSample>>& value, std::pair<MediaTime, MediaTime> range)
{
return value.first < range.first;
}
};
-void SampleMap::addSample(PassRefPtr<MediaSample> prpSample)
+bool SampleMap::empty() const
{
- RefPtr<MediaSample> sample = prpSample;
- ASSERT(sample);
- m_presentationSamples.insert(MapType::value_type(sample->presentationTime(), sample));
- m_decodeSamples.insert(MapType::value_type(sample->decodeTime(), sample));
+ return presentationOrder().m_samples.empty();
+}
+
+void SampleMap::clear()
+{
+ presentationOrder().m_samples.clear();
+ decodeOrder().m_samples.clear();
+ m_totalSize = 0;
+}
+
+void SampleMap::addSample(MediaSample& sample)
+{
+ MediaTime presentationTime = sample.presentationTime();
+
+ presentationOrder().m_samples.insert(PresentationOrderSampleMap::MapType::value_type(presentationTime, &sample));
+
+ auto decodeKey = DecodeOrderSampleMap::KeyType(sample.decodeTime(), presentationTime);
+ decodeOrder().m_samples.insert(DecodeOrderSampleMap::MapType::value_type(decodeKey, &sample));
+
+ m_totalSize += sample.sizeInBytes();
}
void SampleMap::removeSample(MediaSample* sample)
{
ASSERT(sample);
- m_presentationSamples.erase(sample->presentationTime());
- m_decodeSamples.erase(sample->decodeTime());
+ MediaTime presentationTime = sample->presentationTime();
+
+ m_totalSize -= sample->sizeInBytes();
+
+ auto decodeKey = DecodeOrderSampleMap::KeyType(sample->decodeTime(), presentationTime);
+ presentationOrder().m_samples.erase(presentationTime);
+ decodeOrder().m_samples.erase(decodeKey);
}
-SampleMap::iterator SampleMap::findSampleContainingPresentationTime(const MediaTime& time)
+PresentationOrderSampleMap::iterator PresentationOrderSampleMap::findSampleWithPresentationTime(const MediaTime& time)
{
- return std::equal_range(presentationBegin(), presentationEnd(), time, SampleIsLessThanMediaTimeComparator()).first;
+ auto range = m_samples.equal_range(time);
+ if (range.first == range.second)
+ return end();
+ return range.first;
+}
+
+PresentationOrderSampleMap::iterator PresentationOrderSampleMap::findSampleContainingPresentationTime(const MediaTime& time)
+{
+ // upper_bound will return the first sample whose presentation start time is greater than the search time.
+ // If this is the first sample, that means no sample in the map contains the requested time.
+ auto iter = m_samples.upper_bound(time);
+ if (iter == begin())
+ return end();
+
+ // Look at the previous sample; does it contain the requested time?
+ --iter;
+ MediaSample& sample = *iter->second;
+ if (sample.presentationTime() + sample.duration() > time)
+ return iter;
+ return end();
}
-SampleMap::iterator SampleMap::findSampleAfterPresentationTime(const MediaTime& time)
+PresentationOrderSampleMap::iterator PresentationOrderSampleMap::findSampleStartingOnOrAfterPresentationTime(const MediaTime& time)
{
- return std::lower_bound(presentationBegin(), presentationEnd(), time, SampleIsLessThanMediaTimeComparator());
+ return m_samples.lower_bound(time);
}
-SampleMap::iterator SampleMap::findSampleWithDecodeTime(const MediaTime& time)
+DecodeOrderSampleMap::iterator DecodeOrderSampleMap::findSampleWithDecodeKey(const KeyType& key)
{
- return m_decodeSamples.find(time);
+ return m_samples.find(key);
}
-SampleMap::reverse_iterator SampleMap::reverseFindSampleContainingPresentationTime(const MediaTime& time)
+PresentationOrderSampleMap::reverse_iterator PresentationOrderSampleMap::reverseFindSampleContainingPresentationTime(const MediaTime& time)
{
- return std::equal_range(reversePresentationBegin(), reversePresentationEnd(), time, SampleIsGreaterThanMediaTimeComparator()).first;
+ auto range = std::equal_range(rbegin(), rend(), time, SampleIsGreaterThanMediaTimeComparator<MapType>());
+ if (range.first == range.second)
+ return rend();
+ return range.first;
}
-SampleMap::reverse_iterator SampleMap::reverseFindSampleBeforePresentationTime(const MediaTime& time)
+PresentationOrderSampleMap::reverse_iterator PresentationOrderSampleMap::reverseFindSampleBeforePresentationTime(const MediaTime& time)
{
- return std::lower_bound(reversePresentationBegin(), reversePresentationEnd(), time, SampleIsGreaterThanMediaTimeComparator());
+ if (m_samples.empty())
+ return rend();
+
+ // upper_bound will return the first sample whose presentation start time is greater than the search time.
+ auto found = m_samples.upper_bound(time);
+
+ // If no sample was found with a time greater than the search time, return the last sample.
+ if (found == end())
+ return rbegin();
+
+ // If the first sample has a time grater than the search time, no samples will have a presentation time before the search time.
+ if (found == begin())
+ return rend();
+
+ // Otherwise, return the sample immediately previous to the one found.
+ return --reverse_iterator(--found);
}
-SampleMap::reverse_iterator SampleMap::reverseFindSampleWithDecodeTime(const MediaTime& time)
+DecodeOrderSampleMap::reverse_iterator DecodeOrderSampleMap::reverseFindSampleWithDecodeKey(const KeyType& key)
{
- SampleMap::iterator found = findSampleWithDecodeTime(time);
- if (found == decodeEnd())
- return reverseDecodeEnd();
+ DecodeOrderSampleMap::iterator found = findSampleWithDecodeKey(key);
+ if (found == end())
+ return rend();
return --reverse_iterator(found);
}
-SampleMap::reverse_iterator SampleMap::findSyncSamplePriorToPresentationTime(const MediaTime& time, const MediaTime& threshold)
+DecodeOrderSampleMap::reverse_iterator DecodeOrderSampleMap::findSyncSamplePriorToPresentationTime(const MediaTime& time, const MediaTime& threshold)
{
- reverse_iterator reverseCurrentSamplePTS = reverseFindSampleBeforePresentationTime(time);
- if (reverseCurrentSamplePTS == reversePresentationEnd())
- return reverseDecodeEnd();
+ PresentationOrderSampleMap::reverse_iterator reverseCurrentSamplePTS = m_presentationOrder.reverseFindSampleBeforePresentationTime(time);
+ if (reverseCurrentSamplePTS == m_presentationOrder.rend())
+ return rend();
- reverse_iterator reverseCurrentSampleDTS = reverseFindSampleWithDecodeTime(reverseCurrentSamplePTS->second->decodeTime());
+ const RefPtr<MediaSample>& sample = reverseCurrentSamplePTS->second;
+ reverse_iterator reverseCurrentSampleDTS = reverseFindSampleWithDecodeKey(KeyType(sample->decodeTime(), sample->presentationTime()));
reverse_iterator foundSample = findSyncSamplePriorToDecodeIterator(reverseCurrentSampleDTS);
- if (foundSample == reverseDecodeEnd())
- return reverseDecodeEnd();
+ if (foundSample == rend())
+ return rend();
if (foundSample->second->presentationTime() < time - threshold)
- return reverseDecodeEnd();
+ return rend();
return foundSample;
}
-SampleMap::reverse_iterator SampleMap::findSyncSamplePriorToDecodeIterator(reverse_iterator iterator)
+DecodeOrderSampleMap::reverse_iterator DecodeOrderSampleMap::findSyncSamplePriorToDecodeIterator(reverse_iterator iterator)
{
- return std::find_if(iterator, reverseDecodeEnd(), SampleIsRandomAccess());
+ return std::find_if(iterator, rend(), SampleIsRandomAccess());
}
-SampleMap::iterator SampleMap::findSyncSampleAfterPresentationTime(const MediaTime& time, const MediaTime& threshold)
+DecodeOrderSampleMap::iterator DecodeOrderSampleMap::findSyncSampleAfterPresentationTime(const MediaTime& time, const MediaTime& threshold)
{
- iterator currentSamplePTS = findSampleAfterPresentationTime(time);
- if (currentSamplePTS == presentationEnd())
- return decodeEnd();
+ PresentationOrderSampleMap::iterator currentSamplePTS = m_presentationOrder.findSampleStartingOnOrAfterPresentationTime(time);
+ if (currentSamplePTS == m_presentationOrder.end())
+ return end();
- iterator currentSampleDTS = findSampleWithDecodeTime(currentSamplePTS->second->decodeTime());
+ const RefPtr<MediaSample>& sample = currentSamplePTS->second;
+ iterator currentSampleDTS = findSampleWithDecodeKey(KeyType(sample->decodeTime(), sample->presentationTime()));
MediaTime upperBound = time + threshold;
- iterator foundSample = std::find_if(currentSampleDTS, decodeEnd(), SampleIsRandomAccess());
- if (foundSample == decodeEnd())
- return decodeEnd();
+ iterator foundSample = std::find_if(currentSampleDTS, end(), SampleIsRandomAccess());
+ if (foundSample == end())
+ return end();
if (foundSample->second->presentationTime() > upperBound)
- return decodeEnd();
+ return end();
return foundSample;
}
-SampleMap::iterator SampleMap::findSyncSampleAfterDecodeIterator(iterator currentSampleDTS)
+DecodeOrderSampleMap::iterator DecodeOrderSampleMap::findSyncSampleAfterDecodeIterator(iterator currentSampleDTS)
{
- return std::find_if(currentSampleDTS, decodeEnd(), SampleIsRandomAccess());
+ if (currentSampleDTS == end())
+ return end();
+ return std::find_if(++currentSampleDTS, end(), SampleIsRandomAccess());
}
-SampleMap::iterator_range SampleMap::findSamplesBetweenPresentationTimes(const MediaTime& begin, const MediaTime& end)
+PresentationOrderSampleMap::iterator_range PresentationOrderSampleMap::findSamplesBetweenPresentationTimes(const MediaTime& beginTime, const MediaTime& endTime)
{
- std::pair<MediaTime, MediaTime> range(begin, end);
- return std::equal_range(presentationBegin(), presentationEnd(), range, SamplePresentationTimeIsWithinRangeComparator());
+ // startTime is inclusive, so use lower_bound to include samples wich start exactly at startTime.
+ // endTime is not inclusive, so use lower_bound to exclude samples which start exactly at endTime.
+ auto lower_bound = m_samples.lower_bound(beginTime);
+ auto upper_bound = m_samples.lower_bound(endTime);
+ if (lower_bound == upper_bound)
+ return { end(), end() };
+ return { lower_bound, upper_bound };
+}
+
+PresentationOrderSampleMap::iterator_range PresentationOrderSampleMap::findSamplesWithinPresentationRange(const MediaTime& beginTime, const MediaTime& endTime)
+{
+ // startTime is not inclusive, so use upper_bound to exclude samples which start exactly at startTime.
+ // endTime is inclusive, so use upper_bound to include samples which start exactly at endTime.
+ auto lower_bound = m_samples.upper_bound(beginTime);
+ auto upper_bound = m_samples.upper_bound(endTime);
+ if (lower_bound == upper_bound)
+ return { end(), end() };
+ return { lower_bound, upper_bound };
+}
+
+PresentationOrderSampleMap::iterator_range PresentationOrderSampleMap::findSamplesWithinPresentationRangeFromEnd(const MediaTime& beginTime, const MediaTime& endTime)
+{
+ reverse_iterator rangeEnd = std::find_if(rbegin(), rend(), [&beginTime](auto& value) {
+ return value.second->presentationTime() <= beginTime;
+ });
+
+ reverse_iterator rangeStart = std::find_if(rbegin(), rangeEnd, [&endTime](auto& value) {
+ return value.second->presentationTime() <= endTime;
+ });
+
+ return iterator_range(rangeEnd.base(), rangeStart.base());
}
-SampleMap::reverse_iterator_range SampleMap::findDependentSamples(MediaSample* sample)
+DecodeOrderSampleMap::reverse_iterator_range DecodeOrderSampleMap::findDependentSamples(MediaSample* sample)
{
ASSERT(sample);
- reverse_iterator currentDecodeIter = reverseFindSampleWithDecodeTime(sample->decodeTime());
+ reverse_iterator currentDecodeIter = reverseFindSampleWithDecodeKey(KeyType(sample->decodeTime(), sample->presentationTime()));
reverse_iterator nextSyncSample = findSyncSamplePriorToDecodeIterator(currentDecodeIter);
return reverse_iterator_range(currentDecodeIter, nextSyncSample);
}
diff --git a/Source/WebCore/Modules/mediasource/SampleMap.h b/Source/WebCore/Modules/mediasource/SampleMap.h
index deb42c453..639797a2b 100644
--- a/Source/WebCore/Modules/mediasource/SampleMap.h
+++ b/Source/WebCore/Modules/mediasource/SampleMap.h
@@ -23,8 +23,7 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef SampleMap_h
-#define SampleMap_h
+#pragma once
#if ENABLE(MEDIA_SOURCE)
@@ -35,48 +34,103 @@
namespace WebCore {
class MediaSample;
+class SampleMap;
-class SampleMap {
+class PresentationOrderSampleMap {
+ friend class SampleMap;
public:
typedef std::map<MediaTime, RefPtr<MediaSample>> MapType;
typedef MapType::iterator iterator;
+ typedef MapType::const_iterator const_iterator;
typedef MapType::reverse_iterator reverse_iterator;
+ typedef MapType::const_reverse_iterator const_reverse_iterator;
typedef std::pair<iterator, iterator> iterator_range;
+
+ iterator begin() { return m_samples.begin(); }
+ const_iterator begin() const { return m_samples.begin(); }
+ iterator end() { return m_samples.end(); }
+ const_iterator end() const { return m_samples.end(); }
+ reverse_iterator rbegin() { return m_samples.rbegin(); }
+ const_reverse_iterator rbegin() const { return m_samples.rbegin(); }
+ reverse_iterator rend() { return m_samples.rend(); }
+ const_reverse_iterator rend() const { return m_samples.rend(); }
+
+ WEBCORE_EXPORT iterator findSampleWithPresentationTime(const MediaTime&);
+ WEBCORE_EXPORT iterator findSampleContainingPresentationTime(const MediaTime&);
+ WEBCORE_EXPORT iterator findSampleStartingOnOrAfterPresentationTime(const MediaTime&);
+ WEBCORE_EXPORT reverse_iterator reverseFindSampleContainingPresentationTime(const MediaTime&);
+ WEBCORE_EXPORT reverse_iterator reverseFindSampleBeforePresentationTime(const MediaTime&);
+ WEBCORE_EXPORT iterator_range findSamplesBetweenPresentationTimes(const MediaTime&, const MediaTime&);
+ WEBCORE_EXPORT iterator_range findSamplesWithinPresentationRange(const MediaTime&, const MediaTime&);
+ WEBCORE_EXPORT iterator_range findSamplesWithinPresentationRangeFromEnd(const MediaTime&, const MediaTime&);
+
+private:
+ MapType m_samples;
+};
+
+class DecodeOrderSampleMap {
+ friend class SampleMap;
+public:
+ typedef std::pair<MediaTime, MediaTime> KeyType;
+ typedef std::map<KeyType, RefPtr<MediaSample>> MapType;
+ typedef MapType::iterator iterator;
+ typedef MapType::const_iterator const_iterator;
+ typedef MapType::reverse_iterator reverse_iterator;
+ typedef MapType::const_reverse_iterator const_reverse_iterator;
typedef std::pair<reverse_iterator, reverse_iterator> reverse_iterator_range;
- void addSample(PassRefPtr<MediaSample>);
- void removeSample(MediaSample*);
-
- iterator presentationBegin() { return m_presentationSamples.begin(); }
- iterator presentationEnd() { return m_presentationSamples.end(); }
- iterator decodeBegin() { return m_decodeSamples.begin(); }
- iterator decodeEnd() { return m_decodeSamples.end(); }
- reverse_iterator reversePresentationBegin() { return m_presentationSamples.rbegin(); }
- reverse_iterator reversePresentationEnd() { return m_presentationSamples.rend(); }
- reverse_iterator reverseDecodeBegin() { return m_decodeSamples.rbegin(); }
- reverse_iterator reverseDecodeEnd() { return m_decodeSamples.rend(); }
-
- iterator findSampleContainingPresentationTime(const MediaTime&);
- iterator findSampleAfterPresentationTime(const MediaTime&);
- iterator findSampleWithDecodeTime(const MediaTime&);
- reverse_iterator reverseFindSampleContainingPresentationTime(const MediaTime&);
- reverse_iterator reverseFindSampleBeforePresentationTime(const MediaTime&);
- reverse_iterator reverseFindSampleWithDecodeTime(const MediaTime&);
- reverse_iterator findSyncSamplePriorToPresentationTime(const MediaTime&, const MediaTime& threshold = MediaTime::positiveInfiniteTime());
- reverse_iterator findSyncSamplePriorToDecodeIterator(reverse_iterator);
- iterator findSyncSampleAfterPresentationTime(const MediaTime&, const MediaTime& threshold = MediaTime::positiveInfiniteTime());
- iterator findSyncSampleAfterDecodeIterator(iterator);
-
- iterator_range findSamplesBetweenPresentationTimes(const MediaTime&, const MediaTime&);
- reverse_iterator_range findDependentSamples(MediaSample*);
-
+ iterator begin() { return m_samples.begin(); }
+ const_iterator begin() const { return m_samples.begin(); }
+ iterator end() { return m_samples.end(); }
+ const_iterator end() const { return m_samples.end(); }
+ reverse_iterator rbegin() { return m_samples.rbegin(); }
+ const_reverse_iterator rbegin() const { return m_samples.rbegin(); }
+ reverse_iterator rend() { return m_samples.rend(); }
+ const_reverse_iterator rend() const { return m_samples.rend(); }
+
+ WEBCORE_EXPORT iterator findSampleWithDecodeKey(const KeyType&);
+ WEBCORE_EXPORT reverse_iterator reverseFindSampleWithDecodeKey(const KeyType&);
+ WEBCORE_EXPORT reverse_iterator findSyncSamplePriorToPresentationTime(const MediaTime&, const MediaTime& threshold = MediaTime::positiveInfiniteTime());
+ WEBCORE_EXPORT reverse_iterator findSyncSamplePriorToDecodeIterator(reverse_iterator);
+ WEBCORE_EXPORT iterator findSyncSampleAfterPresentationTime(const MediaTime&, const MediaTime& threshold = MediaTime::positiveInfiniteTime());
+ WEBCORE_EXPORT iterator findSyncSampleAfterDecodeIterator(iterator);
+ WEBCORE_EXPORT reverse_iterator_range findDependentSamples(MediaSample*);
+
+private:
+ MapType m_samples;
+ PresentationOrderSampleMap m_presentationOrder;
+};
+
+class SampleMap {
+public:
+ SampleMap() = default;
+
+ WEBCORE_EXPORT bool empty() const;
+ WEBCORE_EXPORT void clear();
+ WEBCORE_EXPORT void addSample(MediaSample&);
+ WEBCORE_EXPORT void removeSample(MediaSample*);
+ size_t sizeInBytes() const { return m_totalSize; }
+
+ template<typename I>
+ void addRange(I begin, I end);
+
+ DecodeOrderSampleMap& decodeOrder() { return m_decodeOrder; }
+ const DecodeOrderSampleMap& decodeOrder() const { return m_decodeOrder; }
+ PresentationOrderSampleMap& presentationOrder() { return m_decodeOrder.m_presentationOrder; }
+ const PresentationOrderSampleMap& presentationOrder() const { return m_decodeOrder.m_presentationOrder; }
+
private:
- MapType m_presentationSamples;
- MapType m_decodeSamples;
+ DecodeOrderSampleMap m_decodeOrder;
+ size_t m_totalSize { 0 };
};
+template<typename I>
+inline void SampleMap::addRange(I begin, I end)
+{
+ for (I iter = begin; iter != end; ++iter)
+ addSample(*iter->second);
}
-#endif
+} // namespace WebCore
-#endif // SampleMap_h
+#endif // ENABLE(MEDIA_SOURCE)
diff --git a/Source/WebCore/Modules/mediasource/SourceBuffer.cpp b/Source/WebCore/Modules/mediasource/SourceBuffer.cpp
index e642179f3..2b4ac5854 100644
--- a/Source/WebCore/Modules/mediasource/SourceBuffer.cpp
+++ b/Source/WebCore/Modules/mediasource/SourceBuffer.cpp
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2013 Google Inc. All rights reserved.
+ * Copyright (C) 2013-2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
@@ -34,8 +35,10 @@
#if ENABLE(MEDIA_SOURCE)
#include "AudioTrackList.h"
+#include "BufferSource.h"
#include "Event.h"
-#include "ExceptionCodePlaceholder.h"
+#include "EventNames.h"
+#include "ExceptionCode.h"
#include "GenericEventQueue.h"
#include "HTMLMediaElement.h"
#include "InbandTextTrack.h"
@@ -44,58 +47,71 @@
#include "MediaSample.h"
#include "MediaSource.h"
#include "SampleMap.h"
+#include "SourceBufferList.h"
#include "SourceBufferPrivate.h"
#include "TextTrackList.h"
#include "TimeRanges.h"
#include "VideoTrackList.h"
+#include <limits>
#include <map>
+#include <runtime/JSCInlines.h>
+#include <runtime/JSLock.h>
+#include <runtime/VM.h>
+#include <wtf/CurrentTime.h>
#include <wtf/NeverDestroyed.h>
namespace WebCore {
+static const double ExponentialMovingAverageCoefficient = 0.1;
+
struct SourceBuffer::TrackBuffer {
MediaTime lastDecodeTimestamp;
MediaTime lastFrameDuration;
MediaTime highestPresentationTimestamp;
MediaTime lastEnqueuedPresentationTime;
- bool needRandomAccessFlag;
- bool enabled;
+ MediaTime lastEnqueuedDecodeEndTime;
+ bool needRandomAccessFlag { true };
+ bool enabled { false };
+ bool needsReenqueueing { false };
SampleMap samples;
- SampleMap::MapType decodeQueue;
+ DecodeOrderSampleMap::MapType decodeQueue;
RefPtr<MediaDescription> description;
+ PlatformTimeRanges buffered;
TrackBuffer()
: lastDecodeTimestamp(MediaTime::invalidTime())
, lastFrameDuration(MediaTime::invalidTime())
, highestPresentationTimestamp(MediaTime::invalidTime())
, lastEnqueuedPresentationTime(MediaTime::invalidTime())
- , needRandomAccessFlag(true)
- , enabled(false)
+ , lastEnqueuedDecodeEndTime(MediaTime::invalidTime())
{
}
};
-PassRef<SourceBuffer> SourceBuffer::create(PassRef<SourceBufferPrivate> sourceBufferPrivate, MediaSource* source)
+Ref<SourceBuffer> SourceBuffer::create(Ref<SourceBufferPrivate>&& sourceBufferPrivate, MediaSource* source)
{
- RefPtr<SourceBuffer> sourceBuffer(adoptRef(new SourceBuffer(std::move(sourceBufferPrivate), source)));
+ auto sourceBuffer = adoptRef(*new SourceBuffer(WTFMove(sourceBufferPrivate), source));
sourceBuffer->suspendIfNeeded();
- return sourceBuffer.releaseNonNull();
+ return sourceBuffer;
}
-SourceBuffer::SourceBuffer(PassRef<SourceBufferPrivate> sourceBufferPrivate, MediaSource* source)
+SourceBuffer::SourceBuffer(Ref<SourceBufferPrivate>&& sourceBufferPrivate, MediaSource* source)
: ActiveDOMObject(source->scriptExecutionContext())
- , m_private(std::move(sourceBufferPrivate))
+ , m_private(WTFMove(sourceBufferPrivate))
, m_source(source)
, m_asyncEventQueue(*this)
- , m_updating(false)
- , m_appendBufferTimer(this, &SourceBuffer::appendBufferTimerFired)
- , m_highestPresentationEndTimestamp(MediaTime::invalidTime())
- , m_receivedFirstInitializationSegment(false)
+ , m_appendBufferTimer(*this, &SourceBuffer::appendBufferTimerFired)
+ , m_appendWindowStart(MediaTime::zeroTime())
+ , m_appendWindowEnd(MediaTime::positiveInfiniteTime())
+ , m_groupStartTimestamp(MediaTime::invalidTime())
+ , m_groupEndTimestamp(MediaTime::zeroTime())
, m_buffered(TimeRanges::create())
- , m_active(false)
, m_appendState(WaitingForSegment)
+ , m_timeOfBufferingMonitor(monotonicallyIncreasingTime())
+ , m_pendingRemoveStart(MediaTime::invalidTime())
+ , m_pendingRemoveEnd(MediaTime::invalidTime())
+ , m_removeTimer(*this, &SourceBuffer::removeTimerFired)
{
- ASSERT(m_private);
ASSERT(m_source);
m_private->setClient(this);
@@ -105,50 +121,37 @@ SourceBuffer::~SourceBuffer()
{
ASSERT(isRemoved());
- m_private->setClient(0);
+ m_private->setClient(nullptr);
}
-PassRefPtr<TimeRanges> SourceBuffer::buffered(ExceptionCode& ec) const
+ExceptionOr<Ref<TimeRanges>> SourceBuffer::buffered() const
{
// Section 3.1 buffered attribute steps.
// https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#attributes-1
// 1. If this object has been removed from the sourceBuffers attribute of the parent media source then throw an
// INVALID_STATE_ERR exception and abort these steps.
- if (isRemoved()) {
- ec = INVALID_STATE_ERR;
- return nullptr;
- }
+ if (isRemoved())
+ return Exception { INVALID_STATE_ERR };
// 2. Return a new static normalized TimeRanges object for the media segments buffered.
return m_buffered->copy();
}
-const RefPtr<TimeRanges>& SourceBuffer::buffered() const
-{
- return m_buffered;
-}
-
double SourceBuffer::timestampOffset() const
{
return m_timestampOffset.toDouble();
}
-void SourceBuffer::setTimestampOffset(double offset, ExceptionCode& ec)
+ExceptionOr<void> SourceBuffer::setTimestampOffset(double offset)
{
// Section 3.1 timestampOffset attribute setter steps.
// https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#attributes-1
- // 1. If this object has been removed from the sourceBuffers attribute of the parent media source then throw an
+ // 1. Let new timestamp offset equal the new value being assigned to this attribute.
+ // 2. If this object has been removed from the sourceBuffers attribute of the parent media source, then throw an
// INVALID_STATE_ERR exception and abort these steps.
- if (isRemoved()) {
- ec = INVALID_STATE_ERR;
- return;
- }
-
// 3. If the updating attribute equals true, then throw an INVALID_STATE_ERR exception and abort these steps.
- if (m_updating) {
- ec = INVALID_STATE_ERR;
- return;
- }
+ if (isRemoved() || m_updating)
+ return Exception { INVALID_STATE_ERR };
// 4. If the readyState attribute of the parent media source is in the "ended" state then run the following steps:
// 4.1 Set the readyState attribute of the parent media source to "open"
@@ -156,155 +159,277 @@ void SourceBuffer::setTimestampOffset(double offset, ExceptionCode& ec)
m_source->openIfInEndedState();
// 5. If the append state equals PARSING_MEDIA_SEGMENT, then throw an INVALID_STATE_ERR and abort these steps.
- if (m_appendState == ParsingMediaSegment) {
- ec = INVALID_STATE_ERR;
- return;
- }
+ if (m_appendState == ParsingMediaSegment)
+ return Exception { INVALID_STATE_ERR };
+
+ MediaTime newTimestampOffset = MediaTime::createWithDouble(offset);
+
+ // 6. If the mode attribute equals "sequence", then set the group start timestamp to new timestamp offset.
+ if (m_mode == AppendMode::Sequence)
+ m_groupStartTimestamp = newTimestampOffset;
- // 6. Update the attribute to the new value.
- m_timestampOffset = MediaTime::createWithDouble(offset);
+ // 7. Update the attribute to the new value.
+ m_timestampOffset = newTimestampOffset;
+
+ return { };
}
-void SourceBuffer::appendBuffer(PassRefPtr<ArrayBuffer> data, ExceptionCode& ec)
+double SourceBuffer::appendWindowStart() const
{
- // Section 3.2 appendBuffer()
- // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#widl-SourceBuffer-appendBuffer-void-ArrayBufferView-data
- // 1. If data is null then throw an INVALID_ACCESS_ERR exception and abort these steps.
- if (!data) {
- ec = INVALID_ACCESS_ERR;
- return;
- }
+ return m_appendWindowStart.toDouble();
+}
- appendBufferInternal(static_cast<unsigned char*>(data->data()), data->byteLength(), ec);
+ExceptionOr<void> SourceBuffer::setAppendWindowStart(double newValue)
+{
+ // Section 3.1 appendWindowStart attribute setter steps.
+ // W3C Editor's Draft 16 September 2016
+ // https://rawgit.com/w3c/media-source/45627646344eea0170dd1cbc5a3d508ca751abb8/media-source-respec.html#dom-sourcebuffer-appendwindowstart
+ // 1. If this object has been removed from the sourceBuffers attribute of the parent media source,
+ // then throw an InvalidStateError exception and abort these steps.
+ // 2. If the updating attribute equals true, then throw an InvalidStateError exception and abort these steps.
+ if (isRemoved() || m_updating)
+ return Exception { INVALID_STATE_ERR };
+
+ // 3. If the new value is less than 0 or greater than or equal to appendWindowEnd then
+ // throw an TypeError exception and abort these steps.
+ if (newValue < 0 || newValue >= m_appendWindowEnd.toDouble())
+ return Exception { TypeError };
+
+ // 4. Update the attribute to the new value.
+ m_appendWindowStart = MediaTime::createWithDouble(newValue);
+
+ return { };
}
-void SourceBuffer::appendBuffer(PassRefPtr<ArrayBufferView> data, ExceptionCode& ec)
+double SourceBuffer::appendWindowEnd() const
{
- // Section 3.2 appendBuffer()
- // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#widl-SourceBuffer-appendBuffer-void-ArrayBufferView-data
- // 1. If data is null then throw an INVALID_ACCESS_ERR exception and abort these steps.
- if (!data) {
- ec = INVALID_ACCESS_ERR;
- return;
+ return m_appendWindowEnd.toDouble();
+}
+
+ExceptionOr<void> SourceBuffer::setAppendWindowEnd(double newValue)
+{
+ // Section 3.1 appendWindowEnd attribute setter steps.
+ // W3C Editor's Draft 16 September 2016
+ // https://rawgit.com/w3c/media-source/45627646344eea0170dd1cbc5a3d508ca751abb8/media-source-respec.html#dom-sourcebuffer-appendwindowend
+ // 1. If this object has been removed from the sourceBuffers attribute of the parent media source,
+ // then throw an InvalidStateError exception and abort these steps.
+ // 2. If the updating attribute equals true, then throw an InvalidStateError exception and abort these steps.
+ if (isRemoved() || m_updating)
+ return Exception { INVALID_STATE_ERR };
+
+ // 3. If the new value equals NaN, then throw an TypeError and abort these steps.
+ // 4. If the new value is less than or equal to appendWindowStart then throw an TypeError exception
+ // and abort these steps.
+ if (std::isnan(newValue) || newValue <= m_appendWindowStart.toDouble())
+ return Exception { TypeError };
+
+ // 5.. Update the attribute to the new value.
+ m_appendWindowEnd = MediaTime::createWithDouble(newValue);
+
+ return { };
+}
+
+ExceptionOr<void> SourceBuffer::appendBuffer(const BufferSource& data)
+{
+ return appendBufferInternal(static_cast<const unsigned char*>(data.data()), data.length());
+}
+
+void SourceBuffer::resetParserState()
+{
+ // Section 3.5.2 Reset Parser State algorithm steps.
+ // http://www.w3.org/TR/2014/CR-media-source-20140717/#sourcebuffer-reset-parser-state
+ // 1. If the append state equals PARSING_MEDIA_SEGMENT and the input buffer contains some complete coded frames,
+ // then run the coded frame processing algorithm until all of these complete coded frames have been processed.
+ // FIXME: If any implementation will work in pulling mode (instead of async push to SourceBufferPrivate, and forget)
+ // this should be handled somehow either here, or in m_private->abort();
+
+ // 2. Unset the last decode timestamp on all track buffers.
+ // 3. Unset the last frame duration on all track buffers.
+ // 4. Unset the highest presentation timestamp on all track buffers.
+ // 5. Set the need random access point flag on all track buffers to true.
+ for (auto& trackBufferPair : m_trackBufferMap.values()) {
+ trackBufferPair.lastDecodeTimestamp = MediaTime::invalidTime();
+ trackBufferPair.lastFrameDuration = MediaTime::invalidTime();
+ trackBufferPair.highestPresentationTimestamp = MediaTime::invalidTime();
+ trackBufferPair.needRandomAccessFlag = true;
}
+ // 6. Remove all bytes from the input buffer.
+ // Note: this is handled by abortIfUpdating()
+ // 7. Set append state to WAITING_FOR_SEGMENT.
+ m_appendState = WaitingForSegment;
- appendBufferInternal(static_cast<unsigned char*>(data->baseAddress()), data->byteLength(), ec);
+ m_private->resetParserState();
}
-void SourceBuffer::abort(ExceptionCode& ec)
+ExceptionOr<void> SourceBuffer::abort()
{
// Section 3.2 abort() method steps.
- // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#widl-SourceBuffer-abort-void
+ // https://rawgit.com/w3c/media-source/45627646344eea0170dd1cbc5a3d508ca751abb8/media-source-respec.html#dom-sourcebuffer-abort
// 1. If this object has been removed from the sourceBuffers attribute of the parent media source
// then throw an INVALID_STATE_ERR exception and abort these steps.
// 2. If the readyState attribute of the parent media source is not in the "open" state
// then throw an INVALID_STATE_ERR exception and abort these steps.
- if (isRemoved() || !m_source->isOpen()) {
- ec = INVALID_STATE_ERR;
- return;
- }
+ if (isRemoved() || !m_source->isOpen())
+ return Exception { INVALID_STATE_ERR };
- // 3. If the sourceBuffer.updating attribute equals true, then run the following steps: ...
+ // 3. If the range removal algorithm is running, then throw an InvalidStateError exception and abort these steps.
+ if (m_removeTimer.isActive())
+ return Exception { INVALID_STATE_ERR };
+
+ // 4. If the sourceBuffer.updating attribute equals true, then run the following steps: ...
abortIfUpdating();
- // 4. Run the reset parser state algorithm.
- m_private->abort();
+ // 5. Run the reset parser state algorithm.
+ resetParserState();
+
+ // 6. Set appendWindowStart to the presentation start time.
+ m_appendWindowStart = MediaTime::zeroTime();
+
+ // 7. Set appendWindowEnd to positive Infinity.
+ m_appendWindowEnd = MediaTime::positiveInfiniteTime();
+
+ return { };
+}
+
+ExceptionOr<void> SourceBuffer::remove(double start, double end)
+{
+ return remove(MediaTime::createWithDouble(start), MediaTime::createWithDouble(end));
+}
+
+ExceptionOr<void> SourceBuffer::remove(const MediaTime& start, const MediaTime& end)
+{
+ LOG(MediaSource, "SourceBuffer::remove(%p) - start(%lf), end(%lf)", this, start.toDouble(), end.toDouble());
+
+ // https://rawgit.com/w3c/media-source/45627646344eea0170dd1cbc5a3d508ca751abb8/media-source-respec.html#dom-sourcebuffer-remove
+ // Section 3.2 remove() method steps.
+ // 1. If this object has been removed from the sourceBuffers attribute of the parent media source then throw
+ // an InvalidStateError exception and abort these steps.
+ // 2. If the updating attribute equals true, then throw an InvalidStateError exception and abort these steps.
+ if (isRemoved() || m_updating)
+ return Exception { INVALID_STATE_ERR };
+
+ // 3. If duration equals NaN, then throw a TypeError exception and abort these steps.
+ // 4. If start is negative or greater than duration, then throw a TypeError exception and abort these steps.
+ // 5. If end is less than or equal to start or end equals NaN, then throw a TypeError exception and abort these steps.
+ if (m_source->duration().isInvalid()
+ || end.isInvalid()
+ || start.isInvalid()
+ || start < MediaTime::zeroTime()
+ || start > m_source->duration()
+ || end <= start) {
+ return Exception { TypeError };
+ }
- // FIXME(229408) Add steps 5-6 update appendWindowStart & appendWindowEnd.
+ // 6. If the readyState attribute of the parent media source is in the "ended" state then run the following steps:
+ // 6.1. Set the readyState attribute of the parent media source to "open"
+ // 6.2. Queue a task to fire a simple event named sourceopen at the parent media source .
+ m_source->openIfInEndedState();
+
+ // 7. Run the range removal algorithm with start and end as the start and end of the removal range.
+ rangeRemoval(start, end);
+
+ return { };
}
+void SourceBuffer::rangeRemoval(const MediaTime& start, const MediaTime& end)
+{
+ // 3.5.7 Range Removal
+ // https://rawgit.com/w3c/media-source/7bbe4aa33c61ec025bc7acbd80354110f6a000f9/media-source.html#sourcebuffer-range-removal
+ // 1. Let start equal the starting presentation timestamp for the removal range.
+ // 2. Let end equal the end presentation timestamp for the removal range.
+ // 3. Set the updating attribute to true.
+ m_updating = true;
+
+ // 4. Queue a task to fire a simple event named updatestart at this SourceBuffer object.
+ scheduleEvent(eventNames().updatestartEvent);
+
+ // 5. Return control to the caller and run the rest of the steps asynchronously.
+ m_pendingRemoveStart = start;
+ m_pendingRemoveEnd = end;
+ m_removeTimer.startOneShot(0);
+}
void SourceBuffer::abortIfUpdating()
{
- // Section 3.2 abort() method step 3 substeps.
- // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#widl-SourceBuffer-abort-void
+ // Section 3.2 abort() method step 4 substeps.
+ // https://rawgit.com/w3c/media-source/45627646344eea0170dd1cbc5a3d508ca751abb8/media-source-respec.html#dom-sourcebuffer-abort
if (!m_updating)
return;
- // 3.1. Abort the buffer append and stream append loop algorithms if they are running.
+ // 4.1. Abort the buffer append algorithm if it is running.
m_appendBufferTimer.stop();
m_pendingAppendData.clear();
+ m_private->abort();
- // 3.2. Set the updating attribute to false.
+ // 4.2. Set the updating attribute to false.
m_updating = false;
- // 3.3. Queue a task to fire a simple event named abort at this SourceBuffer object.
+ // 4.3. Queue a task to fire a simple event named abort at this SourceBuffer object.
scheduleEvent(eventNames().abortEvent);
- // 3.4. Queue a task to fire a simple event named updateend at this SourceBuffer object.
+ // 4.4. Queue a task to fire a simple event named updateend at this SourceBuffer object.
scheduleEvent(eventNames().updateendEvent);
}
-void SourceBuffer::removedFromMediaSource()
+MediaTime SourceBuffer::highestPresentationTimestamp() const
{
- if (isRemoved())
- return;
-
- m_private->removedFromMediaSource();
- m_source = 0;
- m_asyncEventQueue.close();
+ MediaTime highestTime;
+ for (auto& trackBuffer : m_trackBufferMap.values()) {
+ auto lastSampleIter = trackBuffer.samples.presentationOrder().rbegin();
+ if (lastSampleIter == trackBuffer.samples.presentationOrder().rend())
+ continue;
+ highestTime = std::max(highestTime, lastSampleIter->first);
+ }
+ return highestTime;
}
-void SourceBuffer::sourceBufferPrivateSeekToTime(SourceBufferPrivate*, const MediaTime& time)
+void SourceBuffer::readyStateChanged()
{
- LOG(Media, "SourceBuffer::sourceBufferPrivateSeekToTime(%p)", this);
-
- for (auto trackBufferIterator = m_trackBufferMap.begin(); trackBufferIterator != m_trackBufferMap.end(); ++trackBufferIterator) {
- TrackBuffer& trackBuffer = trackBufferIterator->value;
- AtomicString trackID = trackBufferIterator->key;
-
- // Find the sample which contains the current presentation time.
- auto currentSamplePTSIterator = trackBuffer.samples.findSampleContainingPresentationTime(time);
+ updateBufferedFromTrackBuffers();
+}
- if (currentSamplePTSIterator == trackBuffer.samples.presentationEnd()) {
- trackBuffer.decodeQueue.clear();
- m_private->flushAndEnqueueNonDisplayingSamples(Vector<RefPtr<MediaSample>>(), trackID);
- continue;
- }
+void SourceBuffer::removedFromMediaSource()
+{
+ if (isRemoved())
+ return;
- // Seach backward for the previous sync sample.
- MediaTime currentSampleDecodeTime = currentSamplePTSIterator->second->decodeTime();
- auto currentSampleDTSIterator = trackBuffer.samples.findSampleWithDecodeTime(currentSampleDecodeTime);
- ASSERT(currentSampleDTSIterator != trackBuffer.samples.decodeEnd());
+ abortIfUpdating();
- auto reverseCurrentSampleIter = --SampleMap::reverse_iterator(currentSampleDTSIterator);
- auto reverseLastSyncSampleIter = trackBuffer.samples.findSyncSamplePriorToDecodeIterator(reverseCurrentSampleIter);
- if (reverseLastSyncSampleIter == trackBuffer.samples.reverseDecodeEnd()) {
- trackBuffer.decodeQueue.clear();
- m_private->flushAndEnqueueNonDisplayingSamples(Vector<RefPtr<MediaSample>>(), trackID);
- continue;
- }
+ for (auto& trackBufferPair : m_trackBufferMap.values()) {
+ trackBufferPair.samples.clear();
+ trackBufferPair.decodeQueue.clear();
+ }
- Vector<RefPtr<MediaSample>> nonDisplayingSamples;
- for (auto iter = reverseLastSyncSampleIter; iter != reverseCurrentSampleIter; --iter)
- nonDisplayingSamples.append(iter->second);
+ m_private->removedFromMediaSource();
+ m_source = nullptr;
+}
- m_private->flushAndEnqueueNonDisplayingSamples(nonDisplayingSamples, trackID);
+void SourceBuffer::seekToTime(const MediaTime& time)
+{
+ LOG(MediaSource, "SourceBuffer::seekToTime(%p) - time(%s)", this, toString(time).utf8().data());
- // Fill the decode queue with the remaining samples.
- trackBuffer.decodeQueue.clear();
- for (auto iter = currentSampleDTSIterator; iter != trackBuffer.samples.decodeEnd(); ++iter)
- trackBuffer.decodeQueue.insert(*iter);
+ for (auto& trackBufferPair : m_trackBufferMap) {
+ TrackBuffer& trackBuffer = trackBufferPair.value;
+ const AtomicString& trackID = trackBufferPair.key;
- provideMediaData(trackBuffer, trackID);
+ trackBuffer.needsReenqueueing = true;
+ reenqueueMediaForTime(trackBuffer, trackID, time);
}
}
-MediaTime SourceBuffer::sourceBufferPrivateFastSeekTimeForMediaTime(SourceBufferPrivate*, const MediaTime& targetTime, const MediaTime& negativeThreshold, const MediaTime& positiveThreshold)
+MediaTime SourceBuffer::sourceBufferPrivateFastSeekTimeForMediaTime(const MediaTime& targetTime, const MediaTime& negativeThreshold, const MediaTime& positiveThreshold)
{
MediaTime seekTime = targetTime;
MediaTime lowerBoundTime = targetTime - negativeThreshold;
MediaTime upperBoundTime = targetTime + positiveThreshold;
- for (auto trackBufferIterator = m_trackBufferMap.begin(); trackBufferIterator != m_trackBufferMap.end(); ++trackBufferIterator) {
- TrackBuffer& trackBuffer = trackBufferIterator->value;
-
+ for (auto& trackBuffer : m_trackBufferMap.values()) {
// Find the sample which contains the target time time.
- auto futureSyncSampleIterator = trackBuffer.samples.findSyncSampleAfterPresentationTime(targetTime, positiveThreshold);
- auto pastSyncSampleIterator = trackBuffer.samples.findSyncSamplePriorToPresentationTime(targetTime, negativeThreshold);
- auto upperBound = trackBuffer.samples.decodeEnd();
- auto lowerBound = trackBuffer.samples.reverseDecodeEnd();
+ auto futureSyncSampleIterator = trackBuffer.samples.decodeOrder().findSyncSampleAfterPresentationTime(targetTime, positiveThreshold);
+ auto pastSyncSampleIterator = trackBuffer.samples.decodeOrder().findSyncSamplePriorToPresentationTime(targetTime, negativeThreshold);
+ auto upperBound = trackBuffer.samples.decodeOrder().end();
+ auto lowerBound = trackBuffer.samples.decodeOrder().rend();
if (futureSyncSampleIterator == upperBound && pastSyncSampleIterator == lowerBound)
continue;
@@ -331,12 +456,23 @@ MediaTime SourceBuffer::sourceBufferPrivateFastSeekTimeForMediaTime(SourceBuffer
bool SourceBuffer::hasPendingActivity() const
{
- return m_source;
+ return m_source || m_asyncEventQueue.hasPendingEvents();
}
void SourceBuffer::stop()
{
m_appendBufferTimer.stop();
+ m_removeTimer.stop();
+}
+
+bool SourceBuffer::canSuspendForDocumentSuspension() const
+{
+ return !hasPendingActivity();
+}
+
+const char* SourceBuffer::activeDOMObjectName() const
+{
+ return "SourceBuffer";
}
bool SourceBuffer::isRemoved() const
@@ -346,13 +482,13 @@ bool SourceBuffer::isRemoved() const
void SourceBuffer::scheduleEvent(const AtomicString& eventName)
{
- RefPtr<Event> event = Event::create(eventName, false, false);
+ auto event = Event::create(eventName, false, false);
event->setTarget(this);
- m_asyncEventQueue.enqueueEvent(event.release());
+ m_asyncEventQueue.enqueueEvent(WTFMove(event));
}
-void SourceBuffer::appendBufferInternal(unsigned char* data, unsigned size, ExceptionCode& ec)
+ExceptionOr<void> SourceBuffer::appendBufferInternal(const unsigned char* data, unsigned size)
{
// Section 3.2 appendBuffer()
// https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#widl-SourceBuffer-appendBuffer-void-ArrayBufferView-data
@@ -364,10 +500,8 @@ void SourceBuffer::appendBufferInternal(unsigned char* data, unsigned size, Exce
// 1. If the SourceBuffer has been removed from the sourceBuffers attribute of the parent media source
// then throw an INVALID_STATE_ERR exception and abort these steps.
// 2. If the updating attribute equals true, then throw an INVALID_STATE_ERR exception and abort these steps.
- if (isRemoved() || m_updating) {
- ec = INVALID_STATE_ERR;
- return;
- }
+ if (isRemoved() || m_updating)
+ return Exception { INVALID_STATE_ERR };
// 3. If the readyState attribute of the parent media source is in the "ended" state then run the following steps:
// 3.1. Set the readyState attribute of the parent media source to "open"
@@ -375,13 +509,16 @@ void SourceBuffer::appendBufferInternal(unsigned char* data, unsigned size, Exce
m_source->openIfInEndedState();
// 4. Run the coded frame eviction algorithm.
- m_private->evictCodedFrames();
+ evictCodedFrames(size);
+ // FIXME: enable this code when MSE libraries have been updated to support it.
+#if USE(GSTREAMER)
// 5. If the buffer full flag equals true, then throw a QUOTA_EXCEEDED_ERR exception and abort these step.
- if (m_private->isFull()) {
- ec = QUOTA_EXCEEDED_ERR;
- return;
+ if (m_bufferFull) {
+ LOG(MediaSource, "SourceBuffer::appendBufferInternal(%p) - buffer full, failing with QUOTA_EXCEEDED_ERR error", this);
+ return Exception { QUOTA_EXCEEDED_ERR };
}
+#endif
// NOTE: Return to 3.2 appendBuffer()
// 3. Add data to the end of the input buffer.
@@ -395,10 +532,17 @@ void SourceBuffer::appendBufferInternal(unsigned char* data, unsigned size, Exce
// 6. Asynchronously run the buffer append algorithm.
m_appendBufferTimer.startOneShot(0);
+
+ reportExtraMemoryAllocated();
+
+ return { };
}
-void SourceBuffer::appendBufferTimerFired(Timer<SourceBuffer>&)
+void SourceBuffer::appendBufferTimerFired()
{
+ if (isRemoved())
+ return;
+
ASSERT(m_updating);
// Section 3.5.5 Buffer Append Algorithm
@@ -417,31 +561,44 @@ void SourceBuffer::appendBufferTimerFired(Timer<SourceBuffer>&)
// https://dvcs.w3.org/hg/html-media/raw-file/tip/media-source/media-source.html#sourcebuffer-segment-parser-loop
// When the segment parser loop algorithm is invoked, run the following steps:
- SourceBufferPrivate::AppendResult result = SourceBufferPrivate::AppendSucceeded;
- do {
- // 1. Loop Top: If the input buffer is empty, then jump to the need more data step below.
- if (!m_pendingAppendData.size())
- break;
+ // 1. Loop Top: If the input buffer is empty, then jump to the need more data step below.
+ if (!m_pendingAppendData.size()) {
+ sourceBufferPrivateAppendComplete(AppendSucceeded);
+ return;
+ }
- result = m_private->append(m_pendingAppendData.data(), appendSize);
- m_pendingAppendData.clear();
+ m_private->append(m_pendingAppendData.data(), appendSize);
+ m_pendingAppendData.clear();
+}
- // 2. If the input buffer contains bytes that violate the SourceBuffer byte stream format specification,
- // then run the end of stream algorithm with the error parameter set to "decode" and abort this algorithm.
- if (result == SourceBufferPrivate::ParsingFailed) {
- m_source->streamEndedWithError(decodeError(), IgnorableExceptionCode());
- break;
- }
+void SourceBuffer::sourceBufferPrivateAppendComplete(AppendResult result)
+{
+ if (isRemoved())
+ return;
- // NOTE: Steps 3 - 6 enforced by sourceBufferPrivateDidReceiveInitializationSegment() and
- // sourceBufferPrivateDidReceiveSample below.
+ // Resolve the changes it TrackBuffers' buffered ranges
+ // into the SourceBuffer's buffered ranges
+ updateBufferedFromTrackBuffers();
- // 7. Need more data: Return control to the calling algorithm.
- } while (0);
+ // Section 3.5.5 Buffer Append Algorithm, ctd.
+ // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#sourcebuffer-buffer-append
+
+ // 2. If the input buffer contains bytes that violate the SourceBuffer byte stream format specification,
+ // then run the append error algorithm with the decode error parameter set to true and abort this algorithm.
+ if (result == ParsingFailed) {
+ LOG(MediaSource, "SourceBuffer::sourceBufferPrivateAppendComplete(%p) - result = ParsingFailed", this);
+ appendError(true);
+ return;
+ }
+
+ // NOTE: Steps 3 - 6 enforced by sourceBufferPrivateDidReceiveInitializationSegment() and
+ // sourceBufferPrivateDidReceiveSample below.
+
+ // 7. Need more data: Return control to the calling algorithm.
// NOTE: return to Section 3.5.5
// 2.If the segment parser loop algorithm in the previous step was aborted, then abort this algorithm.
- if (result != SourceBufferPrivate::AppendSucceeded)
+ if (result != AppendSucceeded)
return;
// 3. Set the updating attribute to false.
@@ -453,54 +610,364 @@ void SourceBuffer::appendBufferTimerFired(Timer<SourceBuffer>&)
// 5. Queue a task to fire a simple event named updateend at this SourceBuffer object.
scheduleEvent(eventNames().updateendEvent);
- m_source->monitorSourceBuffers();
- for (auto iter = m_trackBufferMap.begin(), end = m_trackBufferMap.end(); iter != end; ++iter)
- provideMediaData(iter->value, iter->key);
+ if (m_source)
+ m_source->monitorSourceBuffers();
+
+ MediaTime currentMediaTime = m_source->currentTime();
+ for (auto& trackBufferPair : m_trackBufferMap) {
+ TrackBuffer& trackBuffer = trackBufferPair.value;
+ const AtomicString& trackID = trackBufferPair.key;
+
+ if (trackBuffer.needsReenqueueing) {
+ LOG(MediaSource, "SourceBuffer::sourceBufferPrivateAppendComplete(%p) - reenqueuing at time (%s)", this, toString(currentMediaTime).utf8().data());
+ reenqueueMediaForTime(trackBuffer, trackID, currentMediaTime);
+ } else
+ provideMediaData(trackBuffer, trackID);
+ }
+
+ reportExtraMemoryAllocated();
+ if (extraMemoryCost() > this->maximumBufferSize())
+ m_bufferFull = true;
+
+ LOG(Media, "SourceBuffer::sourceBufferPrivateAppendComplete(%p) - buffered = %s", this, toString(m_buffered->ranges()).utf8().data());
}
-const AtomicString& SourceBuffer::decodeError()
+void SourceBuffer::sourceBufferPrivateDidReceiveRenderingError(int error)
{
- static NeverDestroyed<AtomicString> decode("decode", AtomicString::ConstructFromLiteral);
- return decode;
+#if LOG_DISABLED
+ UNUSED_PARAM(error);
+#endif
+
+ LOG(MediaSource, "SourceBuffer::sourceBufferPrivateDidReceiveRenderingError(%p) - result = %i", this, error);
+
+ if (!isRemoved())
+ m_source->streamEndedWithError(MediaSource::EndOfStreamError::Decode);
}
-const AtomicString& SourceBuffer::networkError()
+static bool decodeTimeComparator(const PresentationOrderSampleMap::MapType::value_type& a, const PresentationOrderSampleMap::MapType::value_type& b)
{
- static NeverDestroyed<AtomicString> network("network", AtomicString::ConstructFromLiteral);
- return network;
+ return a.second->decodeTime() < b.second->decodeTime();
}
-VideoTrackList* SourceBuffer::videoTracks()
+static PlatformTimeRanges removeSamplesFromTrackBuffer(const DecodeOrderSampleMap::MapType& samples, SourceBuffer::TrackBuffer& trackBuffer, const SourceBuffer* buffer, const char* logPrefix)
{
- if (!m_source->mediaElement())
- return nullptr;
+#if !LOG_DISABLED
+ MediaTime earliestSample = MediaTime::positiveInfiniteTime();
+ MediaTime latestSample = MediaTime::zeroTime();
+ size_t bytesRemoved = 0;
+#else
+ UNUSED_PARAM(logPrefix);
+ UNUSED_PARAM(buffer);
+#endif
- if (!m_videoTracks)
- m_videoTracks = VideoTrackList::create(m_source->mediaElement(), ActiveDOMObject::scriptExecutionContext());
+ PlatformTimeRanges erasedRanges;
+ for (auto sampleIt : samples) {
+ const DecodeOrderSampleMap::KeyType& decodeKey = sampleIt.first;
+#if !LOG_DISABLED
+ size_t startBufferSize = trackBuffer.samples.sizeInBytes();
+#endif
+
+ RefPtr<MediaSample>& sample = sampleIt.second;
+ LOG(MediaSource, "SourceBuffer::%s(%p) - removing sample(%s)", logPrefix, buffer, toString(*sampleIt.second).utf8().data());
+
+ // Remove the erased samples from the TrackBuffer sample map.
+ trackBuffer.samples.removeSample(sample.get());
+
+ // Also remove the erased samples from the TrackBuffer decodeQueue.
+ trackBuffer.decodeQueue.erase(decodeKey);
+
+ auto startTime = sample->presentationTime();
+ auto endTime = startTime + sample->duration();
+ erasedRanges.add(startTime, endTime);
+
+#if !LOG_DISABLED
+ bytesRemoved += startBufferSize - trackBuffer.samples.sizeInBytes();
+ if (startTime < earliestSample)
+ earliestSample = startTime;
+ if (endTime > latestSample)
+ latestSample = endTime;
+#endif
+ }
+
+ // Because we may have added artificial padding in the buffered ranges when adding samples, we may
+ // need to remove that padding when removing those same samples. Walk over the erased ranges looking
+ // for unbuffered areas and expand erasedRanges to encompass those areas.
+ PlatformTimeRanges additionalErasedRanges;
+ for (unsigned i = 0; i < erasedRanges.length(); ++i) {
+ auto erasedStart = erasedRanges.start(i);
+ auto erasedEnd = erasedRanges.end(i);
+ auto startIterator = trackBuffer.samples.presentationOrder().reverseFindSampleBeforePresentationTime(erasedStart);
+ if (startIterator == trackBuffer.samples.presentationOrder().rend())
+ additionalErasedRanges.add(MediaTime::zeroTime(), erasedStart);
+ else {
+ auto& previousSample = *startIterator->second;
+ if (previousSample.presentationTime() + previousSample.duration() < erasedStart)
+ additionalErasedRanges.add(previousSample.presentationTime() + previousSample.duration(), erasedStart);
+ }
+
+ auto endIterator = trackBuffer.samples.presentationOrder().findSampleStartingOnOrAfterPresentationTime(erasedEnd);
+ if (endIterator == trackBuffer.samples.presentationOrder().end())
+ additionalErasedRanges.add(erasedEnd, MediaTime::positiveInfiniteTime());
+ else {
+ auto& nextSample = *endIterator->second;
+ if (nextSample.presentationTime() > erasedEnd)
+ additionalErasedRanges.add(erasedEnd, nextSample.presentationTime());
+ }
+ }
+ if (additionalErasedRanges.length())
+ erasedRanges.unionWith(additionalErasedRanges);
+
+#if !LOG_DISABLED
+ if (bytesRemoved)
+ LOG(MediaSource, "SourceBuffer::%s(%p) removed %zu bytes, start(%lf), end(%lf)", logPrefix, buffer, bytesRemoved, earliestSample.toDouble(), latestSample.toDouble());
+#endif
+
+ return erasedRanges;
+}
+
+void SourceBuffer::removeCodedFrames(const MediaTime& start, const MediaTime& end)
+{
+ LOG(MediaSource, "SourceBuffer::removeCodedFrames(%p) - start(%s), end(%s)", this, toString(start).utf8().data(), toString(end).utf8().data());
+
+ // 3.5.9 Coded Frame Removal Algorithm
+ // https://dvcs.w3.org/hg/html-media/raw-file/tip/media-source/media-source.html#sourcebuffer-coded-frame-removal
+
+ // 1. Let start be the starting presentation timestamp for the removal range.
+ MediaTime durationMediaTime = m_source->duration();
+ MediaTime currentMediaTime = m_source->currentTime();
+
+ // 2. Let end be the end presentation timestamp for the removal range.
+ // 3. For each track buffer in this source buffer, run the following steps:
+ for (auto& trackBuffer : m_trackBufferMap.values()) {
+ // 3.1. Let remove end timestamp be the current value of duration
+ // 3.2 If this track buffer has a random access point timestamp that is greater than or equal to end, then update
+ // remove end timestamp to that random access point timestamp.
+
+ // NOTE: To handle MediaSamples which may be an amalgamation of multiple shorter samples, find samples whose presentation
+ // interval straddles the start and end times, and divide them if possible:
+ auto divideSampleIfPossibleAtPresentationTime = [&] (const MediaTime& time) {
+ auto sampleIterator = trackBuffer.samples.presentationOrder().findSampleContainingPresentationTime(time);
+ if (sampleIterator == trackBuffer.samples.presentationOrder().end())
+ return;
+ RefPtr<MediaSample> sample = sampleIterator->second;
+ if (!sample->isDivisable())
+ return;
+ std::pair<RefPtr<MediaSample>, RefPtr<MediaSample>> replacementSamples = sample->divide(time);
+ if (!replacementSamples.first || !replacementSamples.second)
+ return;
+ LOG(MediaSource, "SourceBuffer::removeCodedFrames(%p) - splitting sample (%s) into\n\t(%s)\n\t(%s)", this,
+ toString(sample).utf8().data(),
+ toString(replacementSamples.first).utf8().data(),
+ toString(replacementSamples.second).utf8().data());
+ trackBuffer.samples.removeSample(sample.get());
+ trackBuffer.samples.addSample(*replacementSamples.first);
+ trackBuffer.samples.addSample(*replacementSamples.second);
+ };
+ divideSampleIfPossibleAtPresentationTime(start);
+ divideSampleIfPossibleAtPresentationTime(end);
+
+ // NOTE: findSyncSampleAfterPresentationTime will return the next sync sample on or after the presentation time
+ // or decodeOrder().end() if no sync sample exists after that presentation time.
+ DecodeOrderSampleMap::iterator removeDecodeEnd = trackBuffer.samples.decodeOrder().findSyncSampleAfterPresentationTime(end);
+ PresentationOrderSampleMap::iterator removePresentationEnd;
+ if (removeDecodeEnd == trackBuffer.samples.decodeOrder().end())
+ removePresentationEnd = trackBuffer.samples.presentationOrder().end();
+ else
+ removePresentationEnd = trackBuffer.samples.presentationOrder().findSampleWithPresentationTime(removeDecodeEnd->second->presentationTime());
+
+ PresentationOrderSampleMap::iterator removePresentationStart = trackBuffer.samples.presentationOrder().findSampleStartingOnOrAfterPresentationTime(start);
+ if (removePresentationStart == removePresentationEnd)
+ continue;
+
+ // 3.3 Remove all media data, from this track buffer, that contain starting timestamps greater than or equal to
+ // start and less than the remove end timestamp.
+ // NOTE: frames must be removed in decode order, so that all dependant frames between the frame to be removed
+ // and the next sync sample frame are removed. But we must start from the first sample in decode order, not
+ // presentation order.
+ PresentationOrderSampleMap::iterator minDecodeTimeIter = std::min_element(removePresentationStart, removePresentationEnd, decodeTimeComparator);
+ DecodeOrderSampleMap::KeyType decodeKey(minDecodeTimeIter->second->decodeTime(), minDecodeTimeIter->second->presentationTime());
+ DecodeOrderSampleMap::iterator removeDecodeStart = trackBuffer.samples.decodeOrder().findSampleWithDecodeKey(decodeKey);
+
+ DecodeOrderSampleMap::MapType erasedSamples(removeDecodeStart, removeDecodeEnd);
+ PlatformTimeRanges erasedRanges = removeSamplesFromTrackBuffer(erasedSamples, trackBuffer, this, "removeCodedFrames");
+
+ // Only force the TrackBuffer to re-enqueue if the removed ranges overlap with enqueued and possibly
+ // not yet displayed samples.
+ if (trackBuffer.lastEnqueuedPresentationTime.isValid() && currentMediaTime < trackBuffer.lastEnqueuedPresentationTime) {
+ PlatformTimeRanges possiblyEnqueuedRanges(currentMediaTime, trackBuffer.lastEnqueuedPresentationTime);
+ possiblyEnqueuedRanges.intersectWith(erasedRanges);
+ if (possiblyEnqueuedRanges.length())
+ trackBuffer.needsReenqueueing = true;
+ }
+
+ erasedRanges.invert();
+ trackBuffer.buffered.intersectWith(erasedRanges);
+ setBufferedDirty(true);
+
+ // 3.4 If this object is in activeSourceBuffers, the current playback position is greater than or equal to start
+ // and less than the remove end timestamp, and HTMLMediaElement.readyState is greater than HAVE_METADATA, then set
+ // the HTMLMediaElement.readyState attribute to HAVE_METADATA and stall playback.
+ if (m_active && currentMediaTime >= start && currentMediaTime < end && m_private->readyState() > MediaPlayer::HaveMetadata)
+ m_private->setReadyState(MediaPlayer::HaveMetadata);
+ }
+
+ updateBufferedFromTrackBuffers();
+
+ // 4. If buffer full flag equals true and this object is ready to accept more bytes, then set the buffer full flag to false.
+ // No-op
- return m_videoTracks.get();
+ LOG(Media, "SourceBuffer::removeCodedFrames(%p) - buffered = %s", this, toString(m_buffered->ranges()).utf8().data());
}
-AudioTrackList* SourceBuffer::audioTracks()
+void SourceBuffer::removeTimerFired()
{
- if (!m_source->mediaElement())
- return nullptr;
+ if (isRemoved())
+ return;
- if (!m_audioTracks)
- m_audioTracks = AudioTrackList::create(m_source->mediaElement(), ActiveDOMObject::scriptExecutionContext());
+ ASSERT(m_updating);
+ ASSERT(m_pendingRemoveStart.isValid());
+ ASSERT(m_pendingRemoveStart < m_pendingRemoveEnd);
+
+ // Section 3.5.7 Range Removal
+ // http://w3c.github.io/media-source/#sourcebuffer-range-removal
+
+ // 6. Run the coded frame removal algorithm with start and end as the start and end of the removal range.
+ removeCodedFrames(m_pendingRemoveStart, m_pendingRemoveEnd);
- return m_audioTracks.get();
+ // 7. Set the updating attribute to false.
+ m_updating = false;
+ m_pendingRemoveStart = MediaTime::invalidTime();
+ m_pendingRemoveEnd = MediaTime::invalidTime();
+
+ // 8. Queue a task to fire a simple event named update at this SourceBuffer object.
+ scheduleEvent(eventNames().updateEvent);
+
+ // 9. Queue a task to fire a simple event named updateend at this SourceBuffer object.
+ scheduleEvent(eventNames().updateendEvent);
}
-TextTrackList* SourceBuffer::textTracks()
+void SourceBuffer::evictCodedFrames(size_t newDataSize)
{
- if (!m_source->mediaElement())
- return nullptr;
+ // 3.5.13 Coded Frame Eviction Algorithm
+ // http://www.w3.org/TR/media-source/#sourcebuffer-coded-frame-eviction
- if (!m_textTracks)
- m_textTracks = TextTrackList::create(m_source->mediaElement(), ActiveDOMObject::scriptExecutionContext());
+ if (isRemoved())
+ return;
+
+ // This algorithm is run to free up space in this source buffer when new data is appended.
+ // 1. Let new data equal the data that is about to be appended to this SourceBuffer.
+ // 2. If the buffer full flag equals false, then abort these steps.
+ if (!m_bufferFull)
+ return;
+
+ size_t maximumBufferSize = this->maximumBufferSize();
+
+ // 3. Let removal ranges equal a list of presentation time ranges that can be evicted from
+ // the presentation to make room for the new data.
+
+ // NOTE: begin by removing data from the beginning of the buffered ranges, 30 seconds at
+ // a time, up to 30 seconds before currentTime.
+ MediaTime thirtySeconds = MediaTime(30, 1);
+ MediaTime currentTime = m_source->currentTime();
+ MediaTime maximumRangeEnd = currentTime - thirtySeconds;
+
+#if !LOG_DISABLED
+ LOG(MediaSource, "SourceBuffer::evictCodedFrames(%p) - currentTime = %lf, require %zu bytes, maximum buffer size is %zu", this, m_source->currentTime().toDouble(), extraMemoryCost() + newDataSize, maximumBufferSize);
+ size_t initialBufferedSize = extraMemoryCost();
+#endif
+
+ MediaTime rangeStart = MediaTime::zeroTime();
+ MediaTime rangeEnd = rangeStart + thirtySeconds;
+ while (rangeStart < maximumRangeEnd) {
+ // 4. For each range in removal ranges, run the coded frame removal algorithm with start and
+ // end equal to the removal range start and end timestamp respectively.
+ removeCodedFrames(rangeStart, std::min(rangeEnd, maximumRangeEnd));
+ if (extraMemoryCost() + newDataSize < maximumBufferSize) {
+ m_bufferFull = false;
+ break;
+ }
+
+ rangeStart += thirtySeconds;
+ rangeEnd += thirtySeconds;
+ }
+
+ if (!m_bufferFull) {
+ LOG(MediaSource, "SourceBuffer::evictCodedFrames(%p) - evicted %zu bytes", this, initialBufferedSize - extraMemoryCost());
+ return;
+ }
+
+ // If there still isn't enough free space and there buffers in time ranges after the current range (ie. there is a gap after
+ // the current buffered range), delete 30 seconds at a time from duration back to the current time range or 30 seconds after
+ // currenTime whichever we hit first.
+ auto buffered = m_buffered->ranges();
+ size_t currentTimeRange = buffered.find(currentTime);
+ if (currentTimeRange == notFound || currentTimeRange == buffered.length() - 1) {
+ LOG(MediaSource, "SourceBuffer::evictCodedFrames(%p) - evicted %zu bytes but FAILED to free enough", this, initialBufferedSize - extraMemoryCost());
+ return;
+ }
+
+ MediaTime minimumRangeStart = currentTime + thirtySeconds;
+
+ rangeEnd = m_source->duration();
+ rangeStart = rangeEnd - thirtySeconds;
+ while (rangeStart > minimumRangeStart) {
+
+ // Do not evict data from the time range that contains currentTime.
+ size_t startTimeRange = buffered.find(rangeStart);
+ if (startTimeRange == currentTimeRange) {
+ size_t endTimeRange = buffered.find(rangeEnd);
+ if (endTimeRange == currentTimeRange)
+ break;
- return m_textTracks.get();
+ rangeEnd = buffered.start(endTimeRange);
+ }
+
+ // 4. For each range in removal ranges, run the coded frame removal algorithm with start and
+ // end equal to the removal range start and end timestamp respectively.
+ removeCodedFrames(std::max(minimumRangeStart, rangeStart), rangeEnd);
+ if (extraMemoryCost() + newDataSize < maximumBufferSize) {
+ m_bufferFull = false;
+ break;
+ }
+
+ rangeStart -= thirtySeconds;
+ rangeEnd -= thirtySeconds;
+ }
+
+ LOG(MediaSource, "SourceBuffer::evictCodedFrames(%p) - evicted %zu bytes%s", this, initialBufferedSize - extraMemoryCost(), m_bufferFull ? "" : " but FAILED to free enough");
+}
+
+size_t SourceBuffer::maximumBufferSize() const
+{
+ if (isRemoved())
+ return 0;
+
+ auto* element = m_source->mediaElement();
+ if (!element)
+ return 0;
+
+ return element->maximumSourceBufferSize(*this);
+}
+
+VideoTrackList& SourceBuffer::videoTracks()
+{
+ if (!m_videoTracks)
+ m_videoTracks = VideoTrackList::create(m_source->mediaElement(), scriptExecutionContext());
+ return *m_videoTracks;
+}
+
+AudioTrackList& SourceBuffer::audioTracks()
+{
+ if (!m_audioTracks)
+ m_audioTracks = AudioTrackList::create(m_source->mediaElement(), scriptExecutionContext());
+ return *m_audioTracks;
+}
+
+TextTrackList& SourceBuffer::textTracks()
+{
+ if (!m_textTracks)
+ m_textTracks = TextTrackList::create(m_source->mediaElement(), scriptExecutionContext());
+ return *m_textTracks;
}
void SourceBuffer::setActive(bool active)
@@ -510,42 +977,86 @@ void SourceBuffer::setActive(bool active)
m_active = active;
m_private->setActive(active);
- m_source->sourceBufferDidChangeAcitveState(this, active);
+ if (!isRemoved())
+ m_source->sourceBufferDidChangeActiveState(*this, active);
}
-void SourceBuffer::sourceBufferPrivateDidEndStream(SourceBufferPrivate*, const WTF::AtomicString& error)
+void SourceBuffer::sourceBufferPrivateDidReceiveInitializationSegment(const InitializationSegment& segment)
{
- m_source->endOfStream(error, IgnorableExceptionCode());
-}
+ if (isRemoved())
+ return;
+
+ LOG(MediaSource, "SourceBuffer::sourceBufferPrivateDidReceiveInitializationSegment(%p)", this);
+
+ // 3.5.8 Initialization Segment Received (ctd)
+ // https://rawgit.com/w3c/media-source/c3ad59c7a370d04430969ba73d18dc9bcde57a33/index.html#sourcebuffer-init-segment-received [Editor's Draft 09 January 2015]
-void SourceBuffer::sourceBufferPrivateDidReceiveInitializationSegment(SourceBufferPrivate*, const InitializationSegment& segment)
-{
- // 3.5.7 Initialization Segment Received
- // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#sourcebuffer-init-segment-received
// 1. Update the duration attribute if it currently equals NaN:
- if (std::isnan(m_source->duration())) {
+ if (m_source->duration().isInvalid()) {
// ↳ If the initialization segment contains a duration:
// Run the duration change algorithm with new duration set to the duration in the initialization segment.
// ↳ Otherwise:
// Run the duration change algorithm with new duration set to positive Infinity.
MediaTime newDuration = segment.duration.isValid() ? segment.duration : MediaTime::positiveInfiniteTime();
- m_source->setDuration(newDuration.toDouble(), IGNORE_EXCEPTION);
+ m_source->setDurationInternal(newDuration);
}
- // 2. If the initialization segment has no audio, video, or text tracks, then run the end of stream
- // algorithm with the error parameter set to "decode" and abort these steps.
- if (!segment.audioTracks.size() && !segment.videoTracks.size() && !segment.textTracks.size())
- m_source->endOfStream(decodeError(), IgnorableExceptionCode());
-
+ // 2. If the initialization segment has no audio, video, or text tracks, then run the append error algorithm
+ // with the decode error parameter set to true and abort these steps.
+ if (segment.audioTracks.isEmpty() && segment.videoTracks.isEmpty() && segment.textTracks.isEmpty()) {
+ appendError(true);
+ return;
+ }
// 3. If the first initialization segment flag is true, then run the following steps:
if (m_receivedFirstInitializationSegment) {
+
+ // 3.1. Verify the following properties. If any of the checks fail then run the append error algorithm
+ // with the decode error parameter set to true and abort these steps.
if (!validateInitializationSegment(segment)) {
- m_source->endOfStream(decodeError(), IgnorableExceptionCode());
+ appendError(true);
return;
}
// 3.2 Add the appropriate track descriptions from this initialization segment to each of the track buffers.
- // NOTE: No changes to make
+ ASSERT(segment.audioTracks.size() == audioTracks().length());
+ for (auto& audioTrackInfo : segment.audioTracks) {
+ if (audioTracks().length() == 1) {
+ audioTracks().item(0)->setPrivate(*audioTrackInfo.track);
+ break;
+ }
+
+ auto audioTrack = audioTracks().getTrackById(audioTrackInfo.track->id());
+ ASSERT(audioTrack);
+ audioTrack->setPrivate(*audioTrackInfo.track);
+ }
+
+ ASSERT(segment.videoTracks.size() == videoTracks().length());
+ for (auto& videoTrackInfo : segment.videoTracks) {
+ if (videoTracks().length() == 1) {
+ videoTracks().item(0)->setPrivate(*videoTrackInfo.track);
+ break;
+ }
+
+ auto videoTrack = videoTracks().getTrackById(videoTrackInfo.track->id());
+ ASSERT(videoTrack);
+ videoTrack->setPrivate(*videoTrackInfo.track);
+ }
+
+ ASSERT(segment.textTracks.size() == textTracks().length());
+ for (auto& textTrackInfo : segment.textTracks) {
+ if (textTracks().length() == 1) {
+ downcast<InbandTextTrack>(*textTracks().item(0)).setPrivate(*textTrackInfo.track);
+ break;
+ }
+
+ auto textTrack = textTracks().getTrackById(textTrackInfo.track->id());
+ ASSERT(textTrack);
+ downcast<InbandTextTrack>(*textTrack).setPrivate(*textTrackInfo.track);
+ }
+
+ // 3.3 Set the need random access point flag on all track buffers to true.
+ for (auto& trackBuffer : m_trackBufferMap.values())
+ trackBuffer.needRandomAccessFlag = true;
}
// 4. Let active track flag equal false.
@@ -554,20 +1065,19 @@ void SourceBuffer::sourceBufferPrivateDidReceiveInitializationSegment(SourceBuff
// 5. If the first initialization segment flag is false, then run the following steps:
if (!m_receivedFirstInitializationSegment) {
// 5.1 If the initialization segment contains tracks with codecs the user agent does not support,
- // then run the end of stream algorithm with the error parameter set to "decode" and abort these steps.
+ // then run the append error algorithm with the decode error parameter set to true and abort these steps.
// NOTE: This check is the responsibility of the SourceBufferPrivate.
// 5.2 For each audio track in the initialization segment, run following steps:
- for (auto it = segment.audioTracks.begin(); it != segment.audioTracks.end(); ++it) {
- AudioTrackPrivate* audioTrackPrivate = it->track.get();
-
+ for (auto& audioTrackInfo : segment.audioTracks) {
+ // FIXME: Implement steps 5.2.1-5.2.8.1 as per Editor's Draft 09 January 2015, and reorder this
// 5.2.1 Let new audio track be a new AudioTrack object.
// 5.2.2 Generate a unique ID and assign it to the id property on new video track.
- RefPtr<AudioTrack> newAudioTrack = AudioTrack::create(this, audioTrackPrivate);
+ auto newAudioTrack = AudioTrack::create(*this, *audioTrackInfo.track);
newAudioTrack->setSourceBuffer(this);
// 5.2.3 If audioTracks.length equals 0, then run the following steps:
- if (!audioTracks()->length()) {
+ if (!audioTracks().length()) {
// 5.2.3.1 Set the enabled property on new audio track to true.
newAudioTrack->setEnabled(true);
@@ -579,33 +1089,34 @@ void SourceBuffer::sourceBufferPrivateDidReceiveInitializationSegment(SourceBuff
// 5.2.5 Queue a task to fire a trusted event named addtrack, that does not bubble and is
// not cancelable, and that uses the TrackEvent interface, at the AudioTrackList object
// referenced by the audioTracks attribute on this SourceBuffer object.
- audioTracks()->append(newAudioTrack);
+ audioTracks().append(newAudioTrack.copyRef());
// 5.2.6 Add new audio track to the audioTracks attribute on the HTMLMediaElement.
// 5.2.7 Queue a task to fire a trusted event named addtrack, that does not bubble and is
// not cancelable, and that uses the TrackEvent interface, at the AudioTrackList object
// referenced by the audioTracks attribute on the HTMLMediaElement.
- m_source->mediaElement()->audioTracks()->append(newAudioTrack);
+ m_source->mediaElement()->audioTracks().append(newAudioTrack.copyRef());
// 5.2.8 Create a new track buffer to store coded frames for this track.
ASSERT(!m_trackBufferMap.contains(newAudioTrack->id()));
- TrackBuffer& trackBuffer = m_trackBufferMap.add(newAudioTrack->id(), TrackBuffer()).iterator->value;
+ auto& trackBuffer = m_trackBufferMap.add(newAudioTrack->id(), TrackBuffer()).iterator->value;
// 5.2.9 Add the track description for this track to the track buffer.
- trackBuffer.description = it->description;
+ trackBuffer.description = audioTrackInfo.description;
+
+ m_audioCodecs.append(trackBuffer.description->codec());
}
// 5.3 For each video track in the initialization segment, run following steps:
- for (auto it = segment.videoTracks.begin(); it != segment.videoTracks.end(); ++it) {
- VideoTrackPrivate* videoTrackPrivate = it->track.get();
-
+ for (auto& videoTrackInfo : segment.videoTracks) {
+ // FIXME: Implement steps 5.3.1-5.3.8.1 as per Editor's Draft 09 January 2015, and reorder this
// 5.3.1 Let new video track be a new VideoTrack object.
// 5.3.2 Generate a unique ID and assign it to the id property on new video track.
- RefPtr<VideoTrack> newVideoTrack = VideoTrack::create(this, videoTrackPrivate);
+ auto newVideoTrack = VideoTrack::create(*this, *videoTrackInfo.track);
newVideoTrack->setSourceBuffer(this);
// 5.3.3 If videoTracks.length equals 0, then run the following steps:
- if (!videoTracks()->length()) {
+ if (!videoTracks().length()) {
// 5.3.3.1 Set the selected property on new video track to true.
newVideoTrack->setSelected(true);
@@ -617,58 +1128,64 @@ void SourceBuffer::sourceBufferPrivateDidReceiveInitializationSegment(SourceBuff
// 5.3.5 Queue a task to fire a trusted event named addtrack, that does not bubble and is
// not cancelable, and that uses the TrackEvent interface, at the VideoTrackList object
// referenced by the videoTracks attribute on this SourceBuffer object.
- videoTracks()->append(newVideoTrack);
+ videoTracks().append(newVideoTrack.copyRef());
// 5.3.6 Add new video track to the videoTracks attribute on the HTMLMediaElement.
// 5.3.7 Queue a task to fire a trusted event named addtrack, that does not bubble and is
// not cancelable, and that uses the TrackEvent interface, at the VideoTrackList object
// referenced by the videoTracks attribute on the HTMLMediaElement.
- m_source->mediaElement()->videoTracks()->append(newVideoTrack);
+ m_source->mediaElement()->videoTracks().append(newVideoTrack.copyRef());
// 5.3.8 Create a new track buffer to store coded frames for this track.
ASSERT(!m_trackBufferMap.contains(newVideoTrack->id()));
- TrackBuffer& trackBuffer = m_trackBufferMap.add(newVideoTrack->id(), TrackBuffer()).iterator->value;
+ auto& trackBuffer = m_trackBufferMap.add(newVideoTrack->id(), TrackBuffer()).iterator->value;
// 5.3.9 Add the track description for this track to the track buffer.
- trackBuffer.description = it->description;
+ trackBuffer.description = videoTrackInfo.description;
+
+ m_videoCodecs.append(trackBuffer.description->codec());
}
// 5.4 For each text track in the initialization segment, run following steps:
- for (auto it = segment.textTracks.begin(); it != segment.textTracks.end(); ++it) {
- InbandTextTrackPrivate* textTrackPrivate = it->track.get();
+ for (auto& textTrackInfo : segment.textTracks) {
+ auto& textTrackPrivate = *textTrackInfo.track;
+ // FIXME: Implement steps 5.4.1-5.4.8.1 as per Editor's Draft 09 January 2015, and reorder this
// 5.4.1 Let new text track be a new TextTrack object with its properties populated with the
// appropriate information from the initialization segment.
- RefPtr<InbandTextTrack> newTextTrack = InbandTextTrack::create(scriptExecutionContext(), this, textTrackPrivate);
+ auto newTextTrack = InbandTextTrack::create(*scriptExecutionContext(), *this, textTrackPrivate);
// 5.4.2 If the mode property on new text track equals "showing" or "hidden", then set active
// track flag to true.
- if (textTrackPrivate->mode() != InbandTextTrackPrivate::Disabled)
+ if (textTrackPrivate.mode() != InbandTextTrackPrivate::Disabled)
activeTrackFlag = true;
// 5.4.3 Add new text track to the textTracks attribute on this SourceBuffer object.
// 5.4.4 Queue a task to fire a trusted event named addtrack, that does not bubble and is
// not cancelable, and that uses the TrackEvent interface, at textTracks attribute on this
// SourceBuffer object.
- textTracks()->append(newTextTrack);
+ textTracks().append(newTextTrack.get());
// 5.4.5 Add new text track to the textTracks attribute on the HTMLMediaElement.
// 5.4.6 Queue a task to fire a trusted event named addtrack, that does not bubble and is
// not cancelable, and that uses the TrackEvent interface, at the TextTrackList object
// referenced by the textTracks attribute on the HTMLMediaElement.
- m_source->mediaElement()->textTracks()->append(newTextTrack);
+ m_source->mediaElement()->textTracks().append(WTFMove(newTextTrack));
// 5.4.7 Create a new track buffer to store coded frames for this track.
- ASSERT(!m_trackBufferMap.contains(textTrackPrivate->id()));
- TrackBuffer& trackBuffer = m_trackBufferMap.add(textTrackPrivate->id(), TrackBuffer()).iterator->value;
+ ASSERT(!m_trackBufferMap.contains(textTrackPrivate.id()));
+ auto& trackBuffer = m_trackBufferMap.add(textTrackPrivate.id(), TrackBuffer()).iterator->value;
// 5.4.8 Add the track description for this track to the track buffer.
- trackBuffer.description = it->description;
+ trackBuffer.description = textTrackInfo.description;
+
+ m_textCodecs.append(trackBuffer.description->codec());
}
// 5.5 If active track flag equals true, then run the following steps:
if (activeTrackFlag) {
// 5.5.1 Add this SourceBuffer to activeSourceBuffers.
+ // 5.5.2 Queue a task to fire a simple event named addsourcebuffer at activeSourceBuffers
setActive(true);
}
@@ -679,8 +1196,8 @@ void SourceBuffer::sourceBufferPrivateDidReceiveInitializationSegment(SourceBuff
// 6. If the HTMLMediaElement.readyState attribute is HAVE_NOTHING, then run the following steps:
if (m_private->readyState() == MediaPlayer::HaveNothing) {
// 6.1 If one or more objects in sourceBuffers have first initialization segment flag set to false, then abort these steps.
- for (unsigned long i = 0; i < m_source->sourceBuffers()->length(); ++i) {
- if (!m_source->sourceBuffers()->item(i)->m_receivedFirstInitializationSegment)
+ for (auto& sourceBuffer : *m_source->sourceBuffers()) {
+ if (!sourceBuffer->m_receivedFirstInitializationSegment)
return;
}
@@ -698,52 +1215,52 @@ void SourceBuffer::sourceBufferPrivateDidReceiveInitializationSegment(SourceBuff
bool SourceBuffer::validateInitializationSegment(const InitializationSegment& segment)
{
- // 3.5.7 Initialization Segment Received (ctd)
- // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#sourcebuffer-init-segment-received
+ // FIXME: ordering of all 3.5.X (X>=7) functions needs to be updated to post-[24 July 2014 Editor's Draft] version
+ // 3.5.8 Initialization Segment Received (ctd)
+ // https://rawgit.com/w3c/media-source/c3ad59c7a370d04430969ba73d18dc9bcde57a33/index.html#sourcebuffer-init-segment-received [Editor's Draft 09 January 2015]
- // 3.1. Verify the following properties. If any of the checks fail then run the end of stream
- // algorithm with the error parameter set to "decode" and abort these steps.
+ // Note: those are checks from step 3.1
// * The number of audio, video, and text tracks match what was in the first initialization segment.
- if (segment.audioTracks.size() != audioTracks()->length()
- || segment.videoTracks.size() != videoTracks()->length()
- || segment.textTracks.size() != textTracks()->length())
+ if (segment.audioTracks.size() != audioTracks().length()
+ || segment.videoTracks.size() != videoTracks().length()
+ || segment.textTracks.size() != textTracks().length())
return false;
// * The codecs for each track, match what was specified in the first initialization segment.
- for (auto it = segment.audioTracks.begin(); it != segment.audioTracks.end(); ++it) {
- if (!m_videoCodecs.contains(it->description->codec()))
+ for (auto& audioTrackInfo : segment.audioTracks) {
+ if (!m_audioCodecs.contains(audioTrackInfo.description->codec()))
return false;
}
- for (auto it = segment.videoTracks.begin(); it != segment.videoTracks.end(); ++it) {
- if (!m_audioCodecs.contains(it->description->codec()))
+ for (auto& videoTrackInfo : segment.videoTracks) {
+ if (!m_videoCodecs.contains(videoTrackInfo.description->codec()))
return false;
}
- for (auto it = segment.textTracks.begin(); it != segment.textTracks.end(); ++it) {
- if (!m_textCodecs.contains(it->description->codec()))
+ for (auto& textTrackInfo : segment.textTracks) {
+ if (!m_textCodecs.contains(textTrackInfo.description->codec()))
return false;
}
// * If more than one track for a single type are present (ie 2 audio tracks), then the Track
// IDs match the ones in the first initialization segment.
if (segment.audioTracks.size() >= 2) {
- for (auto it = segment.audioTracks.begin(); it != segment.audioTracks.end(); ++it) {
- if (!m_trackBufferMap.contains(it->track->id()))
+ for (auto& audioTrackInfo : segment.audioTracks) {
+ if (!m_trackBufferMap.contains(audioTrackInfo.track->id()))
return false;
}
}
if (segment.videoTracks.size() >= 2) {
- for (auto it = segment.videoTracks.begin(); it != segment.videoTracks.end(); ++it) {
- if (!m_trackBufferMap.contains(it->track->id()))
+ for (auto& videoTrackInfo : segment.videoTracks) {
+ if (!m_trackBufferMap.contains(videoTrackInfo.track->id()))
return false;
}
}
if (segment.textTracks.size() >= 2) {
- for (auto it = segment.videoTracks.begin(); it != segment.videoTracks.end(); ++it) {
- if (!m_trackBufferMap.contains(it->track->id()))
+ for (auto& textTrackInfo : segment.videoTracks) {
+ if (!m_trackBufferMap.contains(textTrackInfo.track->id()))
return false;
}
}
@@ -769,102 +1286,188 @@ public:
}
};
-void SourceBuffer::sourceBufferPrivateDidReceiveSample(SourceBufferPrivate*, PassRefPtr<MediaSample> prpSample)
+void SourceBuffer::appendError(bool decodeErrorParam)
{
- RefPtr<MediaSample> sample = prpSample;
+ // 3.5.3 Append Error Algorithm
+ // https://rawgit.com/w3c/media-source/c3ad59c7a370d04430969ba73d18dc9bcde57a33/index.html#sourcebuffer-append-error [Editor's Draft 09 January 2015]
+
+ ASSERT(m_updating);
+ // 1. Run the reset parser state algorithm.
+ resetParserState();
+
+ // 2. Set the updating attribute to false.
+ m_updating = false;
+
+ // 3. Queue a task to fire a simple event named error at this SourceBuffer object.
+ scheduleEvent(eventNames().errorEvent);
+
+ // 4. Queue a task to fire a simple event named updateend at this SourceBuffer object.
+ scheduleEvent(eventNames().updateendEvent);
+
+ // 5. If decode error is true, then run the end of stream algorithm with the error parameter set to "decode".
+ if (decodeErrorParam)
+ m_source->streamEndedWithError(MediaSource::EndOfStreamError::Decode);
+}
+
+void SourceBuffer::sourceBufferPrivateDidReceiveSample(MediaSample& sample)
+{
+ if (isRemoved())
+ return;
+
+ // 3.5.1 Segment Parser Loop
+ // 6.1 If the first initialization segment received flag is false, then run the append error algorithm
+ // with the decode error parameter set to true and abort this algorithm.
+ // Note: current design makes SourceBuffer somehow ignorant of append state - it's more a thing
+ // of SourceBufferPrivate. That's why this check can't really be done in appendInternal.
+ // unless we force some kind of design with state machine switching.
+ if (!m_receivedFirstInitializationSegment) {
+ appendError(true);
+ return;
+ }
// 3.5.8 Coded Frame Processing
+ // http://www.w3.org/TR/media-source/#sourcebuffer-coded-frame-processing
+
// When complete coded frames have been parsed by the segment parser loop then the following steps
// are run:
// 1. For each coded frame in the media segment run the following steps:
// 1.1. Loop Top
do {
- // 1.1 (ctd) Let presentation timestamp be a double precision floating point representation of
- // the coded frame's presentation timestamp in seconds.
- MediaTime presentationTimestamp = sample->presentationTime();
-
- // 1.2 Let decode timestamp be a double precision floating point representation of the coded frame's
- // decode timestamp in seconds.
- MediaTime decodeTimestamp = sample->decodeTime();
+ MediaTime presentationTimestamp;
+ MediaTime decodeTimestamp;
+
+ if (m_shouldGenerateTimestamps) {
+ // ↳ If generate timestamps flag equals true:
+ // 1. Let presentation timestamp equal 0.
+ presentationTimestamp = MediaTime::zeroTime();
+
+ // 2. Let decode timestamp equal 0.
+ decodeTimestamp = MediaTime::zeroTime();
+ } else {
+ // ↳ Otherwise:
+ // 1. Let presentation timestamp be a double precision floating point representation of
+ // the coded frame's presentation timestamp in seconds.
+ presentationTimestamp = sample.presentationTime();
+
+ // 2. Let decode timestamp be a double precision floating point representation of the coded frame's
+ // decode timestamp in seconds.
+ decodeTimestamp = sample.decodeTime();
+ }
- // 1.3 Let frame duration be a double precision floating point representation of the coded frame's
+ // 1.2 Let frame duration be a double precision floating point representation of the coded frame's
// duration in seconds.
- MediaTime frameDuration = sample->duration();
+ MediaTime frameDuration = sample.duration();
+
+ // 1.3 If mode equals "sequence" and group start timestamp is set, then run the following steps:
+ if (m_mode == AppendMode::Sequence && m_groupStartTimestamp.isValid()) {
+ // 1.3.1 Set timestampOffset equal to group start timestamp - presentation timestamp.
+ m_timestampOffset = m_groupStartTimestamp;
- // 1.4 If mode equals "sequence" and group start timestamp is set, then run the following steps:
- // FIXME: add support for "sequence" mode
+ // 1.3.2 Set group end timestamp equal to group start timestamp.
+ m_groupEndTimestamp = m_groupStartTimestamp;
- // 1.5 If timestampOffset is not 0, then run the following steps:
- if (m_timestampOffset != MediaTime::zeroTime()) {
- // 1.5.1 Add timestampOffset to the presentation timestamp.
+ // 1.3.3 Set the need random access point flag on all track buffers to true.
+ for (auto& trackBuffer : m_trackBufferMap.values())
+ trackBuffer.needRandomAccessFlag = true;
+
+ // 1.3.4 Unset group start timestamp.
+ m_groupStartTimestamp = MediaTime::invalidTime();
+ }
+
+ // 1.4 If timestampOffset is not 0, then run the following steps:
+ if (m_timestampOffset) {
+ // 1.4.1 Add timestampOffset to the presentation timestamp.
presentationTimestamp += m_timestampOffset;
- // 1.5.2 Add timestampOffset to the decode timestamp.
+ // 1.4.2 Add timestampOffset to the decode timestamp.
decodeTimestamp += m_timestampOffset;
-
- // 1.5.3 If the presentation timestamp or decode timestamp is less than the presentation start
- // time, then run the end of stream algorithm with the error parameter set to "decode", and
- // abort these steps.
- MediaTime presentationStartTime = MediaTime::zeroTime();
- if (presentationTimestamp < presentationStartTime || decodeTimestamp < presentationStartTime) {
- m_source->streamEndedWithError(decodeError(), IgnorableExceptionCode());
- return;
- }
}
- // 1.6 Let track buffer equal the track buffer that the coded frame will be added to.
- AtomicString trackID = sample->trackID();
+ // 1.5 Let track buffer equal the track buffer that the coded frame will be added to.
+ AtomicString trackID = sample.trackID();
auto it = m_trackBufferMap.find(trackID);
- if (it == m_trackBufferMap.end())
- it = m_trackBufferMap.add(trackID, TrackBuffer()).iterator;
+ if (it == m_trackBufferMap.end()) {
+ // The client managed to append a sample with a trackID not present in the initialization
+ // segment. This would be a good place to post an message to the developer console.
+ didDropSample();
+ return;
+ }
TrackBuffer& trackBuffer = it->value;
- // 1.7 If last decode timestamp for track buffer is set and decode timestamp is less than last
+ // 1.6 ↳ If last decode timestamp for track buffer is set and decode timestamp is less than last
// decode timestamp:
// OR
- // If last decode timestamp for track buffer is set and the difference between decode timestamp and
+ // ↳ If last decode timestamp for track buffer is set and the difference between decode timestamp and
// last decode timestamp is greater than 2 times last frame duration:
if (trackBuffer.lastDecodeTimestamp.isValid() && (decodeTimestamp < trackBuffer.lastDecodeTimestamp
|| abs(decodeTimestamp - trackBuffer.lastDecodeTimestamp) > (trackBuffer.lastFrameDuration * 2))) {
- // 1.7.1 If mode equals "segments":
- // Set highest presentation end timestamp to presentation timestamp.
- m_highestPresentationEndTimestamp = presentationTimestamp;
-
- // If mode equals "sequence":
- // Set group start timestamp equal to the highest presentation end timestamp.
- // FIXME: Add support for "sequence" mode.
-
- for (auto i = m_trackBufferMap.values().begin(); i != m_trackBufferMap.values().end(); ++i) {
- // 1.7.2 Unset the last decode timestamp on all track buffers.
- i->lastDecodeTimestamp = MediaTime::invalidTime();
- // 1.7.3 Unset the last frame duration on all track buffers.
- i->lastFrameDuration = MediaTime::invalidTime();
- // 1.7.4 Unset the highest presentation timestamp on all track buffers.
- i->highestPresentationTimestamp = MediaTime::invalidTime();
- // 1.7.5 Set the need random access point flag on all track buffers to true.
- i->needRandomAccessFlag = true;
+
+ // 1.6.1:
+ if (m_mode == AppendMode::Segments) {
+ // ↳ If mode equals "segments":
+ // Set group end timestamp to presentation timestamp.
+ m_groupEndTimestamp = presentationTimestamp;
+ } else {
+ // ↳ If mode equals "sequence":
+ // Set group start timestamp equal to the group end timestamp.
+ m_groupStartTimestamp = m_groupEndTimestamp;
}
- // 1.7.6 Jump to the Loop Top step above to restart processing of the current coded frame.
+ for (auto& trackBuffer : m_trackBufferMap.values()) {
+ // 1.6.2 Unset the last decode timestamp on all track buffers.
+ trackBuffer.lastDecodeTimestamp = MediaTime::invalidTime();
+ // 1.6.3 Unset the last frame duration on all track buffers.
+ trackBuffer.lastFrameDuration = MediaTime::invalidTime();
+ // 1.6.4 Unset the highest presentation timestamp on all track buffers.
+ trackBuffer.highestPresentationTimestamp = MediaTime::invalidTime();
+ // 1.6.5 Set the need random access point flag on all track buffers to true.
+ trackBuffer.needRandomAccessFlag = true;
+ }
+
+ // 1.6.6 Jump to the Loop Top step above to restart processing of the current coded frame.
continue;
}
- // 1.8 Let frame end timestamp equal the sum of presentation timestamp and frame duration.
+ if (m_mode == AppendMode::Sequence) {
+ // Use the generated timestamps instead of the sample's timestamps.
+ sample.setTimestamps(presentationTimestamp, decodeTimestamp);
+ } else if (m_timestampOffset) {
+ // Reflect the timestamp offset into the sample.
+ sample.offsetTimestampsBy(m_timestampOffset);
+ }
+
+ // 1.7 Let frame end timestamp equal the sum of presentation timestamp and frame duration.
MediaTime frameEndTimestamp = presentationTimestamp + frameDuration;
- // 1.9 If presentation timestamp is less than appendWindowStart, then set the need random access
+ // 1.8 If presentation timestamp is less than appendWindowStart, then set the need random access
// point flag to true, drop the coded frame, and jump to the top of the loop to start processing
// the next coded frame.
- // 1.10 If frame end timestamp is greater than appendWindowEnd, then set the need random access
+ // 1.9 If frame end timestamp is greater than appendWindowEnd, then set the need random access
// point flag to true, drop the coded frame, and jump to the top of the loop to start processing
// the next coded frame.
- // FIXME: implement append windows
+ if (presentationTimestamp < m_appendWindowStart || frameEndTimestamp > m_appendWindowEnd) {
+ trackBuffer.needRandomAccessFlag = true;
+ didDropSample();
+ return;
+ }
+
+
+ // 1.10 If the decode timestamp is less than the presentation start time, then run the end of stream
+ // algorithm with the error parameter set to "decode", and abort these steps.
+ // NOTE: Until <https://www.w3.org/Bugs/Public/show_bug.cgi?id=27487> is resolved, we will only check
+ // the presentation timestamp.
+ MediaTime presentationStartTime = MediaTime::zeroTime();
+ if (presentationTimestamp < presentationStartTime) {
+ LOG(MediaSource, "SourceBuffer::sourceBufferPrivateDidReceiveSample(%p) - failing because presentationTimestamp < presentationStartTime", this);
+ m_source->streamEndedWithError(MediaSource::EndOfStreamError::Decode);
+ return;
+ }
// 1.11 If the need random access point flag on track buffer equals true, then run the following steps:
if (trackBuffer.needRandomAccessFlag) {
// 1.11.1 If the coded frame is not a random access point, then drop the coded frame and jump
// to the top of the loop to start processing the next coded frame.
- if (!sample->isSync()) {
+ if (!sample.isSync()) {
didDropSample();
return;
}
@@ -877,16 +1480,15 @@ void SourceBuffer::sourceBufferPrivateDidReceiveSample(SourceBufferPrivate*, Pas
// 1.13 Let spliced timed text frame be an unset variable for holding timed text splice information
// FIXME: Add support for sample splicing.
- SampleMap::MapType erasedSamples;
+ SampleMap erasedSamples;
MediaTime microsecond(1, 1000000);
- // 1.14 If last decode timestamp for track buffer is unset and there is a coded frame in
- // track buffer with a presentation timestamp less than or equal to presentation timestamp
- // and presentation timestamp is less than this coded frame's presentation timestamp plus
- // its frame duration, then run the following steps:
+ // 1.14 If last decode timestamp for track buffer is unset and presentation timestamp falls
+ // falls within the presentation interval of a coded frame in track buffer, then run the
+ // following steps:
if (trackBuffer.lastDecodeTimestamp.isInvalid()) {
- auto iter = trackBuffer.samples.findSampleContainingPresentationTime(presentationTimestamp);
- if (iter != trackBuffer.samples.presentationEnd()) {
+ auto iter = trackBuffer.samples.presentationOrder().findSampleContainingPresentationTime(presentationTimestamp);
+ if (iter != trackBuffer.samples.presentationOrder().end()) {
// 1.14.1 Let overlapped frame be the coded frame in track buffer that matches the condition above.
RefPtr<MediaSample> overlappedFrame = iter->second;
@@ -908,7 +1510,7 @@ void SourceBuffer::sourceBufferPrivateDidReceiveSample(SourceBufferPrivate*, Pas
// 1.14.2.3 If the presentation timestamp is less than the remove window timestamp,
// then remove overlapped frame and any coded frames that depend on it from track buffer.
if (presentationTimestamp < removeWindowTimestamp)
- erasedSamples.insert(*iter);
+ erasedSamples.addSample(*iter->second);
}
// If track buffer contains timed text coded frames:
@@ -922,52 +1524,64 @@ void SourceBuffer::sourceBufferPrivateDidReceiveSample(SourceBufferPrivate*, Pas
if (trackBuffer.highestPresentationTimestamp.isInvalid()) {
// Remove all coded frames from track buffer that have a presentation timestamp greater than or
// equal to presentation timestamp and less than frame end timestamp.
- auto iter_pair = trackBuffer.samples.findSamplesBetweenPresentationTimes(presentationTimestamp, frameEndTimestamp);
- if (iter_pair.first != trackBuffer.samples.presentationEnd())
- erasedSamples.insert(iter_pair.first, iter_pair.second);
+ auto iter_pair = trackBuffer.samples.presentationOrder().findSamplesBetweenPresentationTimes(presentationTimestamp, frameEndTimestamp);
+ if (iter_pair.first != trackBuffer.samples.presentationOrder().end())
+ erasedSamples.addRange(iter_pair.first, iter_pair.second);
}
- // If highest presentation timestamp for track buffer is set and less than presentation timestamp
- if (trackBuffer.highestPresentationTimestamp.isValid() && trackBuffer.highestPresentationTimestamp < presentationTimestamp) {
+ // If highest presentation timestamp for track buffer is set and less than or equal to presentation timestamp
+ if (trackBuffer.highestPresentationTimestamp.isValid() && trackBuffer.highestPresentationTimestamp <= presentationTimestamp) {
// Remove all coded frames from track buffer that have a presentation timestamp greater than highest
// presentation timestamp and less than or equal to frame end timestamp.
- auto iter_pair = trackBuffer.samples.findSamplesBetweenPresentationTimes(trackBuffer.highestPresentationTimestamp, frameEndTimestamp);
- if (iter_pair.first != trackBuffer.samples.presentationEnd())
- erasedSamples.insert(iter_pair.first, iter_pair.second);
+ do {
+ // NOTE: Searching from the end of the trackBuffer will be vastly more efficient if the search range is
+ // near the end of the buffered range. Use a linear-backwards search if the search range is within one
+ // frame duration of the end:
+ unsigned bufferedLength = trackBuffer.buffered.length();
+ if (!bufferedLength)
+ break;
+
+ MediaTime highestBufferedTime = trackBuffer.buffered.maximumBufferedTime();
+
+ PresentationOrderSampleMap::iterator_range range;
+ if (highestBufferedTime - trackBuffer.highestPresentationTimestamp < trackBuffer.lastFrameDuration)
+ range = trackBuffer.samples.presentationOrder().findSamplesWithinPresentationRangeFromEnd(trackBuffer.highestPresentationTimestamp, frameEndTimestamp);
+ else
+ range = trackBuffer.samples.presentationOrder().findSamplesWithinPresentationRange(trackBuffer.highestPresentationTimestamp, frameEndTimestamp);
+
+ if (range.first != trackBuffer.samples.presentationOrder().end())
+ erasedSamples.addRange(range.first, range.second);
+ } while(false);
}
// 1.16 Remove decoding dependencies of the coded frames removed in the previous step:
- SampleMap::MapType dependentSamples;
+ DecodeOrderSampleMap::MapType dependentSamples;
if (!erasedSamples.empty()) {
// If detailed information about decoding dependencies is available:
// FIXME: Add support for detailed dependency information
// Otherwise: Remove all coded frames between the coded frames removed in the previous step
// and the next random access point after those removed frames.
- for (auto erasedIt = erasedSamples.begin(), end = erasedSamples.end(); erasedIt != end; ++erasedIt) {
- auto currentDecodeIter = trackBuffer.samples.findSampleWithDecodeTime(erasedIt->second->decodeTime());
- auto nextSyncIter = trackBuffer.samples.findSyncSampleAfterDecodeIterator(currentDecodeIter);
- dependentSamples.insert(currentDecodeIter, nextSyncIter);
- }
-
-
- RefPtr<TimeRanges> erasedRanges = TimeRanges::create();
- for (auto erasedIt = erasedSamples.begin(), end = erasedSamples.end(); erasedIt != end; ++erasedIt) {
- double startTime = erasedIt->first.toDouble();
- double endTime = ((erasedIt->first + erasedIt->second->duration()) + microsecond).toDouble();
- erasedRanges->add(startTime, endTime);
- trackBuffer.samples.removeSample(erasedIt->second.get());
- }
-
- for (auto dependentIt = dependentSamples.begin(), end = dependentSamples.end(); dependentIt != end; ++dependentIt) {
- double startTime = dependentIt->first.toDouble();
- double endTime = ((dependentIt->first + dependentIt->second->duration()) + microsecond).toDouble();
- erasedRanges->add(startTime, endTime);
- trackBuffer.samples.removeSample(dependentIt->second.get());
+ auto firstDecodeIter = trackBuffer.samples.decodeOrder().findSampleWithDecodeKey(erasedSamples.decodeOrder().begin()->first);
+ auto lastDecodeIter = trackBuffer.samples.decodeOrder().findSampleWithDecodeKey(erasedSamples.decodeOrder().rbegin()->first);
+ auto nextSyncIter = trackBuffer.samples.decodeOrder().findSyncSampleAfterDecodeIterator(lastDecodeIter);
+ dependentSamples.insert(firstDecodeIter, nextSyncIter);
+
+ PlatformTimeRanges erasedRanges = removeSamplesFromTrackBuffer(dependentSamples, trackBuffer, this, "sourceBufferPrivateDidReceiveSample");
+
+ // Only force the TrackBuffer to re-enqueue if the removed ranges overlap with enqueued and possibly
+ // not yet displayed samples.
+ MediaTime currentMediaTime = m_source->currentTime();
+ if (currentMediaTime < trackBuffer.lastEnqueuedPresentationTime) {
+ PlatformTimeRanges possiblyEnqueuedRanges(currentMediaTime, trackBuffer.lastEnqueuedPresentationTime);
+ possiblyEnqueuedRanges.intersectWith(erasedRanges);
+ if (possiblyEnqueuedRanges.length())
+ trackBuffer.needsReenqueueing = true;
}
- erasedRanges->invert();
- m_buffered->intersectWith(erasedRanges.get());
+ erasedRanges.invert();
+ trackBuffer.buffered.intersectWith(erasedRanges);
+ setBufferedDirty(true);
}
// 1.17 If spliced audio frame is set:
@@ -979,7 +1593,11 @@ void SourceBuffer::sourceBufferPrivateDidReceiveSample(SourceBufferPrivate*, Pas
// Otherwise:
// Add the coded frame with the presentation timestamp, decode timestamp, and frame duration to the track buffer.
trackBuffer.samples.addSample(sample);
- trackBuffer.decodeQueue.insert(SampleMap::MapType::value_type(decodeTimestamp, sample));
+
+ if (trackBuffer.lastEnqueuedDecodeEndTime.isInvalid() || decodeTimestamp >= trackBuffer.lastEnqueuedDecodeEndTime) {
+ DecodeOrderSampleMap::KeyType decodeKey(decodeTimestamp, presentationTimestamp);
+ trackBuffer.decodeQueue.insert(DecodeOrderSampleMap::MapType::value_type(decodeKey, &sample));
+ }
// 1.18 Set last decode timestamp for track buffer to decode timestamp.
trackBuffer.lastDecodeTimestamp = decodeTimestamp;
@@ -993,12 +1611,29 @@ void SourceBuffer::sourceBufferPrivateDidReceiveSample(SourceBufferPrivate*, Pas
if (trackBuffer.highestPresentationTimestamp.isInvalid() || frameEndTimestamp > trackBuffer.highestPresentationTimestamp)
trackBuffer.highestPresentationTimestamp = frameEndTimestamp;
- // 1.21 If highest presentation end timestamp is unset or frame end timestamp is greater than highest
- // presentation end timestamp, then set highest presentation end timestamp equal to frame end timestamp.
- if (m_highestPresentationEndTimestamp.isInvalid() || frameEndTimestamp > m_highestPresentationEndTimestamp)
- m_highestPresentationEndTimestamp = frameEndTimestamp;
+ // 1.21 If frame end timestamp is greater than group end timestamp, then set group end timestamp equal
+ // to frame end timestamp.
+ if (m_groupEndTimestamp.isInvalid() || frameEndTimestamp > m_groupEndTimestamp)
+ m_groupEndTimestamp = frameEndTimestamp;
+
+ // 1.22 If generate timestamps flag equals true, then set timestampOffset equal to frame end timestamp.
+ if (m_shouldGenerateTimestamps)
+ m_timestampOffset = frameEndTimestamp;
- m_buffered->add(presentationTimestamp.toDouble(), (presentationTimestamp + frameDuration + microsecond).toDouble());
+ // Eliminate small gaps between buffered ranges by coalescing
+ // disjoint ranges separated by less than a "fudge factor".
+ auto presentationEndTime = presentationTimestamp + frameDuration;
+ auto nearestToPresentationStartTime = trackBuffer.buffered.nearest(presentationTimestamp);
+ if (nearestToPresentationStartTime.isValid() && (presentationTimestamp - nearestToPresentationStartTime).isBetween(MediaTime::zeroTime(), MediaSource::currentTimeFudgeFactor()))
+ presentationTimestamp = nearestToPresentationStartTime;
+
+ auto nearestToPresentationEndTime = trackBuffer.buffered.nearest(presentationEndTime);
+ if (nearestToPresentationEndTime.isValid() && (nearestToPresentationEndTime - presentationEndTime).isBetween(MediaTime::zeroTime(), MediaSource::currentTimeFudgeFactor()))
+ presentationEndTime = nearestToPresentationEndTime;
+
+ trackBuffer.buffered.add(presentationTimestamp, presentationEndTime);
+ m_bufferedSinceLastMonitor += frameDuration.toDouble();
+ setBufferedDirty(true);
break;
} while (1);
@@ -1006,35 +1641,45 @@ void SourceBuffer::sourceBufferPrivateDidReceiveSample(SourceBufferPrivate*, Pas
// Steps 2-4 will be handled by MediaSource::monitorSourceBuffers()
// 5. If the media segment contains data beyond the current duration, then run the duration change algorithm with new
- // duration set to the maximum of the current duration and the highest end timestamp reported by HTMLMediaElement.buffered.
- if (highestPresentationEndTimestamp().toDouble() > m_source->duration())
- m_source->setDuration(highestPresentationEndTimestamp().toDouble(), IgnorableExceptionCode());
+ // duration set to the maximum of the current duration and the group end timestamp.
+ if (m_groupEndTimestamp > m_source->duration())
+ m_source->setDurationInternal(m_groupEndTimestamp);
}
-bool SourceBuffer::sourceBufferPrivateHasAudio(const SourceBufferPrivate*) const
+bool SourceBuffer::hasAudio() const
{
return m_audioTracks && m_audioTracks->length();
}
-bool SourceBuffer::sourceBufferPrivateHasVideo(const SourceBufferPrivate*) const
+bool SourceBuffer::hasVideo() const
{
return m_videoTracks && m_videoTracks->length();
}
-void SourceBuffer::videoTrackSelectedChanged(VideoTrack* track)
+bool SourceBuffer::sourceBufferPrivateHasAudio() const
+{
+ return hasAudio();
+}
+
+bool SourceBuffer::sourceBufferPrivateHasVideo() const
+{
+ return hasVideo();
+}
+
+void SourceBuffer::videoTrackSelectedChanged(VideoTrack& track)
{
// 2.4.5 Changes to selected/enabled track state
// If the selected video track changes, then run the following steps:
// 1. If the SourceBuffer associated with the previously selected video track is not associated with
// any other enabled tracks, run the following steps:
- if (track->selected()
+ if (!track.selected()
&& (!m_videoTracks || !m_videoTracks->isAnyTrackEnabled())
&& (!m_audioTracks || !m_audioTracks->isAnyTrackEnabled())
&& (!m_textTracks || !m_textTracks->isAnyTrackEnabled())) {
// 1.1 Remove the SourceBuffer from activeSourceBuffers.
// 1.2 Queue a task to fire a simple event named removesourcebuffer at activeSourceBuffers
setActive(false);
- } else if (!track->selected()) {
+ } else if (track.selected()) {
// 2. If the SourceBuffer associated with the newly selected video track is not already in activeSourceBuffers,
// run the following steps:
// 2.1 Add the SourceBuffer to activeSourceBuffers.
@@ -1042,23 +1687,26 @@ void SourceBuffer::videoTrackSelectedChanged(VideoTrack* track)
setActive(true);
}
+ if (m_videoTracks && m_videoTracks->contains(track))
+ m_videoTracks->scheduleChangeEvent();
+
if (!isRemoved())
m_source->mediaElement()->videoTrackSelectedChanged(track);
}
-void SourceBuffer::audioTrackEnabledChanged(AudioTrack* track)
+void SourceBuffer::audioTrackEnabledChanged(AudioTrack& track)
{
// 2.4.5 Changes to selected/enabled track state
// If an audio track becomes disabled and the SourceBuffer associated with this track is not
// associated with any other enabled or selected track, then run the following steps:
- if (track->enabled()
+ if (!track.enabled()
&& (!m_videoTracks || !m_videoTracks->isAnyTrackEnabled())
&& (!m_audioTracks || !m_audioTracks->isAnyTrackEnabled())
&& (!m_textTracks || !m_textTracks->isAnyTrackEnabled())) {
// 1. Remove the SourceBuffer associated with the audio track from activeSourceBuffers
// 2. Queue a task to fire a simple event named removesourcebuffer at activeSourceBuffers
setActive(false);
- } else if (!track->enabled()) {
+ } else if (track.enabled()) {
// If an audio track becomes enabled and the SourceBuffer associated with this track is
// not already in activeSourceBuffers, then run the following steps:
// 1. Add the SourceBuffer associated with the audio track to activeSourceBuffers
@@ -1066,16 +1714,19 @@ void SourceBuffer::audioTrackEnabledChanged(AudioTrack* track)
setActive(true);
}
+ if (m_audioTracks && m_audioTracks->contains(track))
+ m_audioTracks->scheduleChangeEvent();
+
if (!isRemoved())
m_source->mediaElement()->audioTrackEnabledChanged(track);
}
-void SourceBuffer::textTrackModeChanged(TextTrack* track)
+void SourceBuffer::textTrackModeChanged(TextTrack& track)
{
// 2.4.5 Changes to selected/enabled track state
// If a text track mode becomes "disabled" and the SourceBuffer associated with this track is not
// associated with any other enabled or selected track, then run the following steps:
- if (track->mode() == TextTrack::disabledKeyword()
+ if (track.mode() == TextTrack::Mode::Disabled
&& (!m_videoTracks || !m_videoTracks->isAnyTrackEnabled())
&& (!m_audioTracks || !m_audioTracks->isAnyTrackEnabled())
&& (!m_textTracks || !m_textTracks->isAnyTrackEnabled())) {
@@ -1090,82 +1741,324 @@ void SourceBuffer::textTrackModeChanged(TextTrack* track)
setActive(true);
}
+ if (m_textTracks && m_textTracks->contains(track))
+ m_textTracks->scheduleChangeEvent();
+
if (!isRemoved())
m_source->mediaElement()->textTrackModeChanged(track);
}
-void SourceBuffer::textTrackAddCue(TextTrack* track, WTF::PassRefPtr<TextTrackCue> cue)
+void SourceBuffer::textTrackAddCue(TextTrack& track, TextTrackCue& cue)
{
if (!isRemoved())
m_source->mediaElement()->textTrackAddCue(track, cue);
}
-void SourceBuffer::textTrackAddCues(TextTrack* track, TextTrackCueList const* cueList)
+void SourceBuffer::textTrackAddCues(TextTrack& track, const TextTrackCueList& cueList)
{
if (!isRemoved())
m_source->mediaElement()->textTrackAddCues(track, cueList);
}
-void SourceBuffer::textTrackRemoveCue(TextTrack* track, WTF::PassRefPtr<TextTrackCue> cue)
+void SourceBuffer::textTrackRemoveCue(TextTrack& track, TextTrackCue& cue)
{
if (!isRemoved())
m_source->mediaElement()->textTrackRemoveCue(track, cue);
}
-void SourceBuffer::textTrackRemoveCues(TextTrack* track, TextTrackCueList const* cueList)
+void SourceBuffer::textTrackRemoveCues(TextTrack& track, const TextTrackCueList& cueList)
{
if (!isRemoved())
m_source->mediaElement()->textTrackRemoveCues(track, cueList);
}
-void SourceBuffer::textTrackKindChanged(TextTrack* track)
+void SourceBuffer::textTrackKindChanged(TextTrack& track)
{
if (!isRemoved())
m_source->mediaElement()->textTrackKindChanged(track);
}
-void SourceBuffer::sourceBufferPrivateDidBecomeReadyForMoreSamples(SourceBufferPrivate*, AtomicString trackID)
+void SourceBuffer::sourceBufferPrivateDidBecomeReadyForMoreSamples(const AtomicString& trackID)
{
- LOG(Media, "SourceBuffer::sourceBufferPrivateDidBecomeReadyForMoreSamples(%p)", this);
+ LOG(MediaSource, "SourceBuffer::sourceBufferPrivateDidBecomeReadyForMoreSamples(%p)", this);
auto it = m_trackBufferMap.find(trackID);
if (it == m_trackBufferMap.end())
return;
- provideMediaData(it->value, trackID);
+ auto& trackBuffer = it->value;
+ if (!trackBuffer.needsReenqueueing && !m_source->isSeeking())
+ provideMediaData(trackBuffer, trackID);
}
-void SourceBuffer::provideMediaData(TrackBuffer& trackBuffer, AtomicString trackID)
+void SourceBuffer::provideMediaData(TrackBuffer& trackBuffer, const AtomicString& trackID)
{
+ if (m_source->isSeeking())
+ return;
+
#if !LOG_DISABLED
unsigned enqueuedSamples = 0;
#endif
- auto sampleIt = trackBuffer.decodeQueue.begin();
- for (auto sampleEnd = trackBuffer.decodeQueue.end(); sampleIt != sampleEnd; ++sampleIt) {
+ while (!trackBuffer.decodeQueue.empty()) {
if (!m_private->isReadyForMoreSamples(trackID)) {
m_private->notifyClientWhenReadyForMoreSamples(trackID);
break;
}
- RefPtr<MediaSample> sample = sampleIt->second;
+ // FIXME(rdar://problem/20635969): Remove this re-entrancy protection when the aforementioned radar is resolved; protecting
+ // against re-entrancy introduces a small inefficency when removing appended samples from the decode queue one at a time
+ // rather than when all samples have been enqueued.
+ auto sample = trackBuffer.decodeQueue.begin()->second;
+ trackBuffer.decodeQueue.erase(trackBuffer.decodeQueue.begin());
+
+ // Do not enqueue samples spanning a significant unbuffered gap.
+ // NOTE: one second is somewhat arbitrary. MediaSource::monitorSourceBuffers() is run
+ // on the playbackTimer, which is effectively every 350ms. Allowing > 350ms gap between
+ // enqueued samples allows for situations where we overrun the end of a buffered range
+ // but don't notice for 350s of playback time, and the client can enqueue data for the
+ // new current time without triggering this early return.
+ // FIXME(135867): Make this gap detection logic less arbitrary.
+ MediaTime oneSecond(1, 1);
+ if (trackBuffer.lastEnqueuedDecodeEndTime.isValid() && sample->decodeTime() - trackBuffer.lastEnqueuedDecodeEndTime > oneSecond)
+ break;
+
trackBuffer.lastEnqueuedPresentationTime = sample->presentationTime();
- m_private->enqueueSample(sample.release(), trackID);
+ trackBuffer.lastEnqueuedDecodeEndTime = sample->decodeTime() + sample->duration();
+ m_private->enqueueSample(sample.releaseNonNull(), trackID);
#if !LOG_DISABLED
++enqueuedSamples;
#endif
+ }
+
+ LOG(MediaSource, "SourceBuffer::provideMediaData(%p) - Enqueued %u samples", this, enqueuedSamples);
+}
+
+void SourceBuffer::reenqueueMediaForTime(TrackBuffer& trackBuffer, const AtomicString& trackID, const MediaTime& time)
+{
+ m_private->flush(trackID);
+ trackBuffer.decodeQueue.clear();
+ // Find the sample which contains the current presentation time.
+ auto currentSamplePTSIterator = trackBuffer.samples.presentationOrder().findSampleContainingPresentationTime(time);
+
+ if (currentSamplePTSIterator == trackBuffer.samples.presentationOrder().end())
+ currentSamplePTSIterator = trackBuffer.samples.presentationOrder().findSampleStartingOnOrAfterPresentationTime(time);
+
+ if (currentSamplePTSIterator == trackBuffer.samples.presentationOrder().end()
+ || (currentSamplePTSIterator->first - time) > MediaSource::currentTimeFudgeFactor())
+ return;
+
+ // Seach backward for the previous sync sample.
+ DecodeOrderSampleMap::KeyType decodeKey(currentSamplePTSIterator->second->decodeTime(), currentSamplePTSIterator->second->presentationTime());
+ auto currentSampleDTSIterator = trackBuffer.samples.decodeOrder().findSampleWithDecodeKey(decodeKey);
+ ASSERT(currentSampleDTSIterator != trackBuffer.samples.decodeOrder().end());
+
+ auto reverseCurrentSampleIter = --DecodeOrderSampleMap::reverse_iterator(currentSampleDTSIterator);
+ auto reverseLastSyncSampleIter = trackBuffer.samples.decodeOrder().findSyncSamplePriorToDecodeIterator(reverseCurrentSampleIter);
+ if (reverseLastSyncSampleIter == trackBuffer.samples.decodeOrder().rend())
+ return;
+
+ // Fill the decode queue with the non-displaying samples.
+ for (auto iter = reverseLastSyncSampleIter; iter != reverseCurrentSampleIter; --iter) {
+ auto copy = iter->second->createNonDisplayingCopy();
+ DecodeOrderSampleMap::KeyType decodeKey(copy->decodeTime(), copy->presentationTime());
+ trackBuffer.decodeQueue.insert(DecodeOrderSampleMap::MapType::value_type(decodeKey, WTFMove(copy)));
+ }
+
+ if (!trackBuffer.decodeQueue.empty()) {
+ auto& lastSample = trackBuffer.decodeQueue.rbegin()->second;
+ trackBuffer.lastEnqueuedPresentationTime = lastSample->presentationTime();
+ trackBuffer.lastEnqueuedDecodeEndTime = lastSample->decodeTime();
+ } else {
+ trackBuffer.lastEnqueuedPresentationTime = MediaTime::invalidTime();
+ trackBuffer.lastEnqueuedDecodeEndTime = MediaTime::invalidTime();
}
- trackBuffer.decodeQueue.erase(trackBuffer.decodeQueue.begin(), sampleIt);
- LOG(Media, "SourceBuffer::provideMediaData(%p) - Enqueued %u samples", this, enqueuedSamples);
+ // Fill the decode queue with the remaining samples.
+ for (auto iter = currentSampleDTSIterator; iter != trackBuffer.samples.decodeOrder().end(); ++iter)
+ trackBuffer.decodeQueue.insert(*iter);
+ provideMediaData(trackBuffer, trackID);
+
+ trackBuffer.needsReenqueueing = false;
}
+
void SourceBuffer::didDropSample()
{
if (!isRemoved())
m_source->mediaElement()->incrementDroppedFrameCount();
}
+void SourceBuffer::monitorBufferingRate()
+{
+ double now = monotonicallyIncreasingTime();
+ double interval = now - m_timeOfBufferingMonitor;
+ double rateSinceLastMonitor = m_bufferedSinceLastMonitor / interval;
+
+ m_timeOfBufferingMonitor = now;
+ m_bufferedSinceLastMonitor = 0;
+
+ m_averageBufferRate += (interval * ExponentialMovingAverageCoefficient) * (rateSinceLastMonitor - m_averageBufferRate);
+
+ LOG(MediaSource, "SourceBuffer::monitorBufferingRate(%p) - m_avegareBufferRate: %lf", this, m_averageBufferRate);
+}
+
+void SourceBuffer::updateBufferedFromTrackBuffers()
+{
+ // 3.1 Attributes, buffered
+ // https://rawgit.com/w3c/media-source/45627646344eea0170dd1cbc5a3d508ca751abb8/media-source-respec.html#dom-sourcebuffer-buffered
+
+ // 2. Let highest end time be the largest track buffer ranges end time across all the track buffers managed by this SourceBuffer object.
+ MediaTime highestEndTime = MediaTime::negativeInfiniteTime();
+ for (auto& trackBuffer : m_trackBufferMap.values()) {
+ if (!trackBuffer.buffered.length())
+ continue;
+ highestEndTime = std::max(highestEndTime, trackBuffer.buffered.maximumBufferedTime());
+ }
+
+ // NOTE: Short circuit the following if none of the TrackBuffers have buffered ranges to avoid generating
+ // a single range of {0, 0}.
+ if (highestEndTime.isNegativeInfinite()) {
+ m_buffered->ranges() = PlatformTimeRanges();
+ return;
+ }
+
+ // 3. Let intersection ranges equal a TimeRange object containing a single range from 0 to highest end time.
+ PlatformTimeRanges intersectionRanges { MediaTime::zeroTime(), highestEndTime };
+
+ // 4. For each audio and video track buffer managed by this SourceBuffer, run the following steps:
+ for (auto& trackBuffer : m_trackBufferMap.values()) {
+ // 4.1 Let track ranges equal the track buffer ranges for the current track buffer.
+ PlatformTimeRanges trackRanges = trackBuffer.buffered;
+ // 4.2 If readyState is "ended", then set the end time on the last range in track ranges to highest end time.
+ if (m_source->isEnded())
+ trackRanges.add(trackRanges.maximumBufferedTime(), highestEndTime);
+
+ // 4.3 Let new intersection ranges equal the intersection between the intersection ranges and the track ranges.
+ // 4.4 Replace the ranges in intersection ranges with the new intersection ranges.
+ intersectionRanges.intersectWith(trackRanges);
+ }
+
+ // 5. If intersection ranges does not contain the exact same range information as the current value of this attribute,
+ // then update the current value of this attribute to intersection ranges.
+ m_buffered->ranges() = intersectionRanges;
+ setBufferedDirty(true);
+}
+
+bool SourceBuffer::canPlayThroughRange(PlatformTimeRanges& ranges)
+{
+ if (isRemoved())
+ return false;
+
+ monitorBufferingRate();
+
+ // Assuming no fluctuations in the buffering rate, loading 1 second per second or greater
+ // means indefinite playback. This could be improved by taking jitter into account.
+ if (m_averageBufferRate > 1)
+ return true;
+
+ // Add up all the time yet to be buffered.
+ MediaTime currentTime = m_source->currentTime();
+ MediaTime duration = m_source->duration();
+
+ PlatformTimeRanges unbufferedRanges = ranges;
+ unbufferedRanges.invert();
+ unbufferedRanges.intersectWith(PlatformTimeRanges(currentTime, std::max(currentTime, duration)));
+ MediaTime unbufferedTime = unbufferedRanges.totalDuration();
+ if (!unbufferedTime.isValid())
+ return true;
+
+ MediaTime timeRemaining = duration - currentTime;
+ return unbufferedTime.toDouble() / m_averageBufferRate < timeRemaining.toDouble();
+}
+
+size_t SourceBuffer::extraMemoryCost() const
+{
+ size_t extraMemoryCost = m_pendingAppendData.capacity();
+ for (auto& trackBuffer : m_trackBufferMap.values())
+ extraMemoryCost += trackBuffer.samples.sizeInBytes();
+
+ return extraMemoryCost;
+}
+
+void SourceBuffer::reportExtraMemoryAllocated()
+{
+ size_t extraMemoryCost = this->extraMemoryCost();
+ if (extraMemoryCost <= m_reportedExtraMemoryCost)
+ return;
+
+ size_t extraMemoryCostDelta = extraMemoryCost - m_reportedExtraMemoryCost;
+ m_reportedExtraMemoryCost = extraMemoryCost;
+
+ JSC::JSLockHolder lock(scriptExecutionContext()->vm());
+ // FIXME: Adopt reportExtraMemoryVisited, and switch to reportExtraMemoryAllocated.
+ // https://bugs.webkit.org/show_bug.cgi?id=142595
+ scriptExecutionContext()->vm().heap.deprecatedReportExtraMemory(extraMemoryCostDelta);
+}
+
+Vector<String> SourceBuffer::bufferedSamplesForTrackID(const AtomicString& trackID)
+{
+ auto it = m_trackBufferMap.find(trackID);
+ if (it == m_trackBufferMap.end())
+ return Vector<String>();
+
+ TrackBuffer& trackBuffer = it->value;
+ Vector<String> sampleDescriptions;
+ for (auto& pair : trackBuffer.samples.decodeOrder())
+ sampleDescriptions.append(toString(*pair.second));
+
+ return sampleDescriptions;
+}
+
+Vector<String> SourceBuffer::enqueuedSamplesForTrackID(const AtomicString& trackID)
+{
+ return m_private->enqueuedSamplesForTrackID(trackID);
+}
+
+Document& SourceBuffer::document() const
+{
+ ASSERT(scriptExecutionContext());
+ return downcast<Document>(*scriptExecutionContext());
+}
+
+ExceptionOr<void> SourceBuffer::setMode(AppendMode newMode)
+{
+ // 3.1 Attributes - mode
+ // http://www.w3.org/TR/media-source/#widl-SourceBuffer-mode
+
+ // On setting, run the following steps:
+
+ // 1. Let new mode equal the new value being assigned to this attribute.
+ // 2. If generate timestamps flag equals true and new mode equals "segments", then throw an INVALID_ACCESS_ERR exception and abort these steps.
+ if (m_shouldGenerateTimestamps && newMode == AppendMode::Segments)
+ return Exception { INVALID_ACCESS_ERR };
+
+ // 3. If this object has been removed from the sourceBuffers attribute of the parent media source, then throw an INVALID_STATE_ERR exception and abort these steps.
+ // 4. If the updating attribute equals true, then throw an INVALID_STATE_ERR exception and abort these steps.
+ if (isRemoved() || m_updating)
+ return Exception { INVALID_STATE_ERR };
+
+ // 5. If the readyState attribute of the parent media source is in the "ended" state then run the following steps:
+ if (m_source->isEnded()) {
+ // 5.1. Set the readyState attribute of the parent media source to "open"
+ // 5.2. Queue a task to fire a simple event named sourceopen at the parent media source.
+ m_source->openIfInEndedState();
+ }
+
+ // 6. If the append state equals PARSING_MEDIA_SEGMENT, then throw an INVALID_STATE_ERR and abort these steps.
+ if (m_appendState == ParsingMediaSegment)
+ return Exception { INVALID_STATE_ERR };
+
+ // 7. If the new mode equals "sequence", then set the group start timestamp to the group end timestamp.
+ if (newMode == AppendMode::Sequence)
+ m_groupStartTimestamp = m_groupEndTimestamp;
+
+ // 8. Update the attribute to new mode.
+ m_mode = newMode;
+
+ return { };
+}
+
} // namespace WebCore
#endif
diff --git a/Source/WebCore/Modules/mediasource/SourceBuffer.h b/Source/WebCore/Modules/mediasource/SourceBuffer.h
index 163405045..5a142d4ca 100644
--- a/Source/WebCore/Modules/mediasource/SourceBuffer.h
+++ b/Source/WebCore/Modules/mediasource/SourceBuffer.h
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2013 Google Inc. All rights reserved.
+ * Copyright (C) 2013-2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
@@ -28,130 +29,167 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef SourceBuffer_h
-#define SourceBuffer_h
+#pragma once
#if ENABLE(MEDIA_SOURCE)
#include "ActiveDOMObject.h"
#include "AudioTrack.h"
#include "EventTarget.h"
-#include "ExceptionCode.h"
+#include "ExceptionOr.h"
#include "GenericEventQueue.h"
-#include "ScriptWrappable.h"
#include "SourceBufferPrivateClient.h"
#include "TextTrack.h"
#include "Timer.h"
#include "VideoTrack.h"
-#include <runtime/ArrayBufferView.h>
-#include <wtf/PassRefPtr.h>
-#include <wtf/RefCounted.h>
-#include <wtf/text/WTFString.h>
namespace WebCore {
class AudioTrackList;
+class BufferSource;
class MediaSource;
+class PlatformTimeRanges;
class SourceBufferPrivate;
class TextTrackList;
class TimeRanges;
class VideoTrackList;
-class SourceBuffer final : public RefCounted<SourceBuffer>, public ActiveDOMObject, public EventTargetWithInlineData, public ScriptWrappable, public SourceBufferPrivateClient, public AudioTrackClient, public VideoTrackClient, public TextTrackClient {
+class SourceBuffer final : public RefCounted<SourceBuffer>, public ActiveDOMObject, public EventTargetWithInlineData, private SourceBufferPrivateClient, private AudioTrackClient, private VideoTrackClient, private TextTrackClient {
public:
- static PassRef<SourceBuffer> create(PassRef<SourceBufferPrivate>, MediaSource*);
-
+ static Ref<SourceBuffer> create(Ref<SourceBufferPrivate>&&, MediaSource*);
virtual ~SourceBuffer();
- // SourceBuffer.idl methods
bool updating() const { return m_updating; }
- PassRefPtr<TimeRanges> buffered(ExceptionCode&) const;
- const RefPtr<TimeRanges>& buffered() const;
+ ExceptionOr<Ref<TimeRanges>> buffered() const;
double timestampOffset() const;
- void setTimestampOffset(double, ExceptionCode&);
- void appendBuffer(PassRefPtr<ArrayBuffer> data, ExceptionCode&);
- void appendBuffer(PassRefPtr<ArrayBufferView> data, ExceptionCode&);
- void abort(ExceptionCode&);
+ ExceptionOr<void> setTimestampOffset(double);
+
+#if ENABLE(VIDEO_TRACK)
+ VideoTrackList& videoTracks();
+ AudioTrackList& audioTracks();
+ TextTrackList& textTracks();
+#endif
+
+ double appendWindowStart() const;
+ ExceptionOr<void> setAppendWindowStart(double);
+ double appendWindowEnd() const;
+ ExceptionOr<void> setAppendWindowEnd(double);
+
+ ExceptionOr<void> appendBuffer(const BufferSource&);
+ ExceptionOr<void> abort();
+ ExceptionOr<void> remove(double start, double end);
+ ExceptionOr<void> remove(const MediaTime&, const MediaTime&);
+
+ const TimeRanges& bufferedInternal() const { ASSERT(m_buffered); return *m_buffered; }
void abortIfUpdating();
void removedFromMediaSource();
- const MediaTime& highestPresentationEndTimestamp() const { return m_highestPresentationEndTimestamp; }
+ void seekToTime(const MediaTime&);
-#if ENABLE(VIDEO_TRACK)
- VideoTrackList* videoTracks();
- AudioTrackList* audioTracks();
- TextTrackList* textTracks();
-#endif
+ bool canPlayThroughRange(PlatformTimeRanges&);
+
+ bool hasVideo() const;
+
+ bool active() const { return m_active; }
+
+ ScriptExecutionContext* scriptExecutionContext() const final { return ActiveDOMObject::scriptExecutionContext(); }
+
+ using RefCounted::ref;
+ using RefCounted::deref;
+
+ struct TrackBuffer;
+
+ Document& document() const;
+
+ enum class AppendMode { Segments, Sequence };
+ AppendMode mode() const { return m_mode; }
+ ExceptionOr<void> setMode(AppendMode);
- // ActiveDOMObject interface
- virtual bool hasPendingActivity() const override;
- virtual void stop() override;
+ void setShouldGenerateTimestamps(bool flag) { m_shouldGenerateTimestamps = flag; }
- // EventTarget interface
- virtual ScriptExecutionContext* scriptExecutionContext() const override { return ActiveDOMObject::scriptExecutionContext(); }
- virtual EventTargetInterface eventTargetInterface() const override { return SourceBufferEventTargetInterfaceType; }
+ bool isBufferedDirty() const { return m_bufferedDirty; }
+ void setBufferedDirty(bool flag) { m_bufferedDirty = flag; }
- using RefCounted<SourceBuffer>::ref;
- using RefCounted<SourceBuffer>::deref;
+ MediaTime highestPresentationTimestamp() const;
+ void readyStateChanged();
-protected:
- // EventTarget interface
- virtual void refEventTarget() override { ref(); }
- virtual void derefEventTarget() override { deref(); }
+ bool hasPendingActivity() const final;
private:
- SourceBuffer(PassRef<SourceBufferPrivate>, MediaSource*);
+ SourceBuffer(Ref<SourceBufferPrivate>&&, MediaSource*);
- // SourceBufferPrivateClient
- virtual void sourceBufferPrivateDidEndStream(SourceBufferPrivate*, const WTF::AtomicString&) override;
- virtual void sourceBufferPrivateDidReceiveInitializationSegment(SourceBufferPrivate*, const InitializationSegment&) override;
- virtual void sourceBufferPrivateDidReceiveSample(SourceBufferPrivate*, PassRefPtr<MediaSample>) override;
- virtual bool sourceBufferPrivateHasAudio(const SourceBufferPrivate*) const override;
- virtual bool sourceBufferPrivateHasVideo(const SourceBufferPrivate*) const override;
- virtual void sourceBufferPrivateDidBecomeReadyForMoreSamples(SourceBufferPrivate*, AtomicString trackID) override;
- virtual void sourceBufferPrivateSeekToTime(SourceBufferPrivate*, const MediaTime&);
- virtual MediaTime sourceBufferPrivateFastSeekTimeForMediaTime(SourceBufferPrivate*, const MediaTime&, const MediaTime& negativeThreshold, const MediaTime& positiveThreshold);
+ void refEventTarget() final { ref(); }
+ void derefEventTarget() final { deref(); }
+ void stop() final;
+ const char* activeDOMObjectName() const final;
+ bool canSuspendForDocumentSuspension() const final;
- // AudioTrackClient
- virtual void audioTrackEnabledChanged(AudioTrack*) override;
+ void sourceBufferPrivateDidReceiveInitializationSegment(const InitializationSegment&) final;
+ void sourceBufferPrivateDidReceiveSample(MediaSample&) final;
+ bool sourceBufferPrivateHasAudio() const final;
+ bool sourceBufferPrivateHasVideo() const final;
+ void sourceBufferPrivateDidBecomeReadyForMoreSamples(const AtomicString& trackID) final;
+ MediaTime sourceBufferPrivateFastSeekTimeForMediaTime(const MediaTime&, const MediaTime& negativeThreshold, const MediaTime& positiveThreshold) final;
+ void sourceBufferPrivateAppendComplete(AppendResult) final;
+ void sourceBufferPrivateDidReceiveRenderingError(int errorCode) final;
- // VideoTrackClient
- virtual void videoTrackSelectedChanged(VideoTrack*) override;
+ void audioTrackEnabledChanged(AudioTrack&) final;
+ void videoTrackSelectedChanged(VideoTrack&) final;
- // TextTrackClient
- virtual void textTrackKindChanged(TextTrack*) override;
- virtual void textTrackModeChanged(TextTrack*) override;
- virtual void textTrackAddCues(TextTrack*, const TextTrackCueList*) override;
- virtual void textTrackRemoveCues(TextTrack*, const TextTrackCueList*) override;
- virtual void textTrackAddCue(TextTrack*, PassRefPtr<TextTrackCue>) override;
- virtual void textTrackRemoveCue(TextTrack*, PassRefPtr<TextTrackCue>) override;
+ void textTrackKindChanged(TextTrack&) final;
+ void textTrackModeChanged(TextTrack&) final;
+ void textTrackAddCues(TextTrack&, const TextTrackCueList&) final;
+ void textTrackRemoveCues(TextTrack&, const TextTrackCueList&) final;
+ void textTrackAddCue(TextTrack&, TextTrackCue&) final;
+ void textTrackRemoveCue(TextTrack&, TextTrackCue&) final;
- static const WTF::AtomicString& decodeError();
- static const WTF::AtomicString& networkError();
+ EventTargetInterface eventTargetInterface() const final { return SourceBufferEventTargetInterfaceType; }
bool isRemoved() const;
void scheduleEvent(const AtomicString& eventName);
- void appendBufferInternal(unsigned char*, unsigned, ExceptionCode&);
- void appendBufferTimerFired(Timer<SourceBuffer>&);
+ ExceptionOr<void> appendBufferInternal(const unsigned char*, unsigned);
+ void appendBufferTimerFired();
+ void resetParserState();
void setActive(bool);
bool validateInitializationSegment(const InitializationSegment&);
- struct TrackBuffer;
- void provideMediaData(TrackBuffer&, AtomicString trackID);
+ void reenqueueMediaForTime(TrackBuffer&, const AtomicString& trackID, const MediaTime&);
+ void provideMediaData(TrackBuffer&, const AtomicString& trackID);
void didDropSample();
+ void evictCodedFrames(size_t newDataSize);
+ size_t maximumBufferSize() const;
+
+ void monitorBufferingRate();
+
+ void removeTimerFired();
+ void removeCodedFrames(const MediaTime& start, const MediaTime& end);
+
+ size_t extraMemoryCost() const;
+ void reportExtraMemoryAllocated();
+
+ void updateBufferedFromTrackBuffers();
+
+ void appendError(bool);
+
+ bool hasAudio() const;
+
+ void rangeRemoval(const MediaTime&, const MediaTime&);
- RefPtr<SourceBufferPrivate> m_private;
+ friend class Internals;
+ WEBCORE_EXPORT Vector<String> bufferedSamplesForTrackID(const AtomicString&);
+ WEBCORE_EXPORT Vector<String> enqueuedSamplesForTrackID(const AtomicString&);
+
+ Ref<SourceBufferPrivate> m_private;
MediaSource* m_source;
GenericEventQueue m_asyncEventQueue;
-
- bool m_updating;
+ AppendMode m_mode { AppendMode::Segments };
Vector<unsigned char> m_pendingAppendData;
- Timer<SourceBuffer> m_appendBufferTimer;
+ Timer m_appendBufferTimer;
RefPtr<VideoTrackList> m_videoTracks;
RefPtr<AudioTrackList> m_audioTracks;
@@ -162,20 +200,36 @@ private:
Vector<AtomicString> m_textCodecs;
MediaTime m_timestampOffset;
- MediaTime m_highestPresentationEndTimestamp;
+ MediaTime m_appendWindowStart;
+ MediaTime m_appendWindowEnd;
+
+ MediaTime m_groupStartTimestamp;
+ MediaTime m_groupEndTimestamp;
HashMap<AtomicString, TrackBuffer> m_trackBufferMap;
- bool m_receivedFirstInitializationSegment;
RefPtr<TimeRanges> m_buffered;
- bool m_active;
+ bool m_bufferedDirty { true };
enum AppendStateType { WaitingForSegment, ParsingInitSegment, ParsingMediaSegment };
AppendStateType m_appendState;
+ double m_timeOfBufferingMonitor;
+ double m_bufferedSinceLastMonitor { 0 };
+ double m_averageBufferRate { 0 };
+
+ size_t m_reportedExtraMemoryCost { 0 };
+
+ MediaTime m_pendingRemoveStart;
+ MediaTime m_pendingRemoveEnd;
+ Timer m_removeTimer;
+
+ bool m_updating { false };
+ bool m_receivedFirstInitializationSegment { false };
+ bool m_active { false };
+ bool m_bufferFull { false };
+ bool m_shouldGenerateTimestamps { false };
};
} // namespace WebCore
#endif
-
-#endif
diff --git a/Source/WebCore/Modules/mediasource/SourceBuffer.idl b/Source/WebCore/Modules/mediasource/SourceBuffer.idl
index f13c6fcd3..9c847c72a 100644
--- a/Source/WebCore/Modules/mediasource/SourceBuffer.idl
+++ b/Source/WebCore/Modules/mediasource/SourceBuffer.idl
@@ -28,33 +28,44 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+[Conditional=MEDIA_SOURCE] enum AppendMode {
+ "segments",
+ "sequence"
+};
+
[
- Conditional=MEDIA_SOURCE,
- NoInterfaceObject,
ActiveDOMObject,
- EventTarget,
- JSGenerateToJSObject,
- JSGenerateToNativeObject,
+ Conditional=MEDIA_SOURCE,
+ ExportMacro=WEBCORE_EXPORT,
] interface SourceBuffer : EventTarget {
+ [SetterMayThrowException] attribute AppendMode mode;
readonly attribute boolean updating;
// Returns the time ranges buffered.
- [GetterRaisesException] readonly attribute TimeRanges buffered;
+ [GetterMayThrowException] readonly attribute TimeRanges buffered;
// Applies an offset to media segment timestamps.
- [SetterRaisesException] attribute double timestampOffset;
+ [SetterMayThrowException] attribute double timestampOffset;
- // Append segment data.
- [RaisesException] void appendBuffer(ArrayBuffer data);
- [RaisesException] void appendBuffer(ArrayBufferView data);
-
- // Abort the current segment append sequence.
- [RaisesException] void abort();
-
// Track support
[Conditional=VIDEO_TRACK] readonly attribute AudioTrackList audioTracks;
[Conditional=VIDEO_TRACK] readonly attribute VideoTrackList videoTracks;
[Conditional=VIDEO_TRACK] readonly attribute TextTrackList textTracks;
-};
+ [SetterMayThrowException] attribute double appendWindowStart;
+ [SetterMayThrowException] attribute unrestricted double appendWindowEnd;
+
+ // Append segment data.
+ [MayThrowException] void appendBuffer(BufferSource data);
+
+ // Abort the current segment append sequence.
+ [MayThrowException] void abort();
+ [MayThrowException] void remove(unrestricted double start, unrestricted double end);
+
+ attribute EventHandler onupdatestart;
+ attribute EventHandler onupdate;
+ attribute EventHandler onupdateend;
+ attribute EventHandler onerror;
+ attribute EventHandler onabort;
+};
diff --git a/Source/WebCore/Modules/mediasource/SourceBufferList.cpp b/Source/WebCore/Modules/mediasource/SourceBufferList.cpp
index 2dd7b81f0..005973709 100644
--- a/Source/WebCore/Modules/mediasource/SourceBufferList.cpp
+++ b/Source/WebCore/Modules/mediasource/SourceBufferList.cpp
@@ -34,6 +34,7 @@
#if ENABLE(MEDIA_SOURCE)
#include "Event.h"
+#include "EventNames.h"
#include "SourceBuffer.h"
namespace WebCore {
@@ -49,15 +50,15 @@ SourceBufferList::~SourceBufferList()
ASSERT(m_list.isEmpty());
}
-void SourceBufferList::add(PassRefPtr<SourceBuffer> buffer)
+void SourceBufferList::add(Ref<SourceBuffer>&& buffer)
{
- m_list.append(buffer);
+ m_list.append(WTFMove(buffer));
scheduleEvent(eventNames().addsourcebufferEvent);
}
-void SourceBufferList::remove(SourceBuffer* buffer)
+void SourceBufferList::remove(SourceBuffer& buffer)
{
- size_t index = m_list.find(buffer);
+ size_t index = m_list.find(&buffer);
if (index == notFound)
return;
m_list.remove(index);
@@ -70,12 +71,30 @@ void SourceBufferList::clear()
scheduleEvent(eventNames().removesourcebufferEvent);
}
+void SourceBufferList::swap(Vector<RefPtr<SourceBuffer>>& other)
+{
+ int changeInSize = other.size() - m_list.size();
+ int addedEntries = 0;
+ for (auto& sourceBuffer : other) {
+ if (!m_list.contains(sourceBuffer))
+ ++addedEntries;
+ }
+ int removedEntries = addedEntries - changeInSize;
+
+ m_list.swap(other);
+
+ if (addedEntries)
+ scheduleEvent(eventNames().addsourcebufferEvent);
+ if (removedEntries)
+ scheduleEvent(eventNames().removesourcebufferEvent);
+}
+
void SourceBufferList::scheduleEvent(const AtomicString& eventName)
{
- RefPtr<Event> event = Event::create(eventName, false, false);
+ auto event = Event::create(eventName, false, false);
event->setTarget(this);
- m_asyncEventQueue.enqueueEvent(event.release());
+ m_asyncEventQueue.enqueueEvent(WTFMove(event));
}
diff --git a/Source/WebCore/Modules/mediasource/SourceBufferList.h b/Source/WebCore/Modules/mediasource/SourceBufferList.h
index fa237b385..fc47f05fe 100644
--- a/Source/WebCore/Modules/mediasource/SourceBufferList.h
+++ b/Source/WebCore/Modules/mediasource/SourceBufferList.h
@@ -28,8 +28,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef SourceBufferList_h
-#define SourceBufferList_h
+#pragma once
#if ENABLE(MEDIA_SOURCE)
@@ -43,28 +42,29 @@ namespace WebCore {
class SourceBuffer;
-class SourceBufferList final : public RefCounted<SourceBufferList>, public ScriptWrappable, public EventTargetWithInlineData {
+class SourceBufferList final : public RefCounted<SourceBufferList>, public EventTargetWithInlineData {
public:
- static PassRefPtr<SourceBufferList> create(ScriptExecutionContext* context)
+ static Ref<SourceBufferList> create(ScriptExecutionContext* context)
{
- return adoptRef(new SourceBufferList(context));
+ return adoptRef(*new SourceBufferList(context));
}
virtual ~SourceBufferList();
unsigned long length() const { return m_list.size(); }
- SourceBuffer* item(unsigned long index) const { return (index < m_list.size()) ? m_list[index].get() : 0; }
+ SourceBuffer* item(unsigned long index) const { return (index < m_list.size()) ? m_list[index].get() : nullptr; }
- void add(PassRefPtr<SourceBuffer>);
- void remove(SourceBuffer*);
- bool contains(SourceBuffer* buffer) { return m_list.find(buffer) != notFound; }
+ void add(Ref<SourceBuffer>&&);
+ void remove(SourceBuffer&);
+ bool contains(SourceBuffer& buffer) { return m_list.find(&buffer) != notFound; }
void clear();
+ void swap(Vector<RefPtr<SourceBuffer>>&);
Vector<RefPtr<SourceBuffer>>::iterator begin() { return m_list.begin(); }
Vector<RefPtr<SourceBuffer>>::iterator end() { return m_list.end(); }
// EventTarget interface
- virtual EventTargetInterface eventTargetInterface() const override { return SourceBufferListEventTargetInterfaceType; }
- virtual ScriptExecutionContext* scriptExecutionContext() const override { return m_scriptExecutionContext; }
+ EventTargetInterface eventTargetInterface() const override { return SourceBufferListEventTargetInterfaceType; }
+ ScriptExecutionContext* scriptExecutionContext() const override { return m_scriptExecutionContext; }
using RefCounted<SourceBufferList>::ref;
using RefCounted<SourceBufferList>::deref;
@@ -74,8 +74,8 @@ private:
void scheduleEvent(const AtomicString&);
- virtual void refEventTarget() override { ref(); }
- virtual void derefEventTarget() override { deref(); }
+ void refEventTarget() override { ref(); }
+ void derefEventTarget() override { deref(); }
ScriptExecutionContext* m_scriptExecutionContext;
GenericEventQueue m_asyncEventQueue;
@@ -85,6 +85,4 @@ private:
} // namespace WebCore
-#endif
-
-#endif
+#endif // ENABLE(MEDIA_SOURCE)
diff --git a/Source/WebCore/Modules/mediasource/SourceBufferList.idl b/Source/WebCore/Modules/mediasource/SourceBufferList.idl
index d3b36df25..f82ab6ca2 100644
--- a/Source/WebCore/Modules/mediasource/SourceBufferList.idl
+++ b/Source/WebCore/Modules/mediasource/SourceBufferList.idl
@@ -30,14 +30,13 @@
[
Conditional=MEDIA_SOURCE,
- NoInterfaceObject,
- EventTarget,
- JSGenerateToJSObject,
- JSGenerateToNativeObject,
GenerateIsReachable=Impl,
CallWith=ScriptExecutionContext,
] interface SourceBufferList : EventTarget {
readonly attribute unsigned long length;
getter SourceBuffer item(unsigned long index);
+
+ attribute EventHandler onaddsourcebuffer;
+ attribute EventHandler onremovesourcebuffer;
};
diff --git a/Source/WebCore/Modules/mediasource/TextTrackMediaSource.h b/Source/WebCore/Modules/mediasource/TextTrackMediaSource.h
new file mode 100644
index 000000000..ad21acff0
--- /dev/null
+++ b/Source/WebCore/Modules/mediasource/TextTrackMediaSource.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(MEDIA_SOURCE) && ENABLE(VIDEO_TRACK)
+
+#include "TextTrack.h"
+
+namespace WebCore {
+
+class SourceBuffer;
+
+class TextTrackMediaSource {
+public:
+ static SourceBuffer* sourceBuffer(TextTrack& track) { return track.sourceBuffer(); }
+};
+
+} // namespace WebCore
+
+#endif // ENABLE(MEDIA_SOURCE) && ENABLE(VIDEO_TRACK)
diff --git a/Source/WebCore/Modules/mediasource/TextTrackMediaSource.idl b/Source/WebCore/Modules/mediasource/TextTrackMediaSource.idl
index 567dd3506..88ed22f31 100644
--- a/Source/WebCore/Modules/mediasource/TextTrackMediaSource.idl
+++ b/Source/WebCore/Modules/mediasource/TextTrackMediaSource.idl
@@ -10,10 +10,10 @@
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
- * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
diff --git a/Source/WebCore/Modules/mediasource/VideoPlaybackQuality.cpp b/Source/WebCore/Modules/mediasource/VideoPlaybackQuality.cpp
index f7a5cf83c..f19561f30 100644
--- a/Source/WebCore/Modules/mediasource/VideoPlaybackQuality.cpp
+++ b/Source/WebCore/Modules/mediasource/VideoPlaybackQuality.cpp
@@ -28,9 +28,9 @@
namespace WebCore {
-RefPtr<VideoPlaybackQuality> VideoPlaybackQuality::create(double creationTime, unsigned long totalVideoFrames, unsigned long droppedVideoFrames, unsigned long corruptedVideoFrames, double totalFrameDelay)
+Ref<VideoPlaybackQuality> VideoPlaybackQuality::create(double creationTime, unsigned long totalVideoFrames, unsigned long droppedVideoFrames, unsigned long corruptedVideoFrames, double totalFrameDelay)
{
- return adoptRef(new VideoPlaybackQuality(creationTime, totalVideoFrames, droppedVideoFrames, corruptedVideoFrames, totalFrameDelay));
+ return adoptRef(*new VideoPlaybackQuality(creationTime, totalVideoFrames, droppedVideoFrames, corruptedVideoFrames, totalFrameDelay));
}
VideoPlaybackQuality::VideoPlaybackQuality(double creationTime, unsigned long totalVideoFrames, unsigned long droppedVideoFrames, unsigned long corruptedVideoFrames, double totalFrameDelay)
diff --git a/Source/WebCore/Modules/mediasource/VideoPlaybackQuality.h b/Source/WebCore/Modules/mediasource/VideoPlaybackQuality.h
index 5b89377b7..2fff6d35f 100644
--- a/Source/WebCore/Modules/mediasource/VideoPlaybackQuality.h
+++ b/Source/WebCore/Modules/mediasource/VideoPlaybackQuality.h
@@ -23,8 +23,7 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef VideoPlaybackQuality_h
-#define VideoPlaybackQuality_h
+#pragma once
#include <wtf/RefCounted.h>
#include <wtf/RefPtr.h>
@@ -34,7 +33,7 @@ namespace WebCore {
class VideoPlaybackQuality : public RefCounted<VideoPlaybackQuality> {
WTF_MAKE_NONCOPYABLE(VideoPlaybackQuality)
public:
- static RefPtr<VideoPlaybackQuality> create(double creationTime, unsigned long totalVideoFrames, unsigned long droppedVideoFrames, unsigned long corruptedVideoFrames, double totalFrameDelay);
+ static Ref<VideoPlaybackQuality> create(double creationTime, unsigned long totalVideoFrames, unsigned long droppedVideoFrames, unsigned long corruptedVideoFrames, double totalFrameDelay);
double creationTime() const { return m_creationTime; }
unsigned long totalVideoFrames() const { return m_totalVideoFrames; }
@@ -52,6 +51,4 @@ protected:
double m_totalFrameDelay;
};
-}
-
-#endif // VideoPlaybackQuality_h
+} // namespace WebCore
diff --git a/Source/WebCore/Modules/mediasource/VideoPlaybackQuality.idl b/Source/WebCore/Modules/mediasource/VideoPlaybackQuality.idl
index 65c619301..e78d0eb96 100644
--- a/Source/WebCore/Modules/mediasource/VideoPlaybackQuality.idl
+++ b/Source/WebCore/Modules/mediasource/VideoPlaybackQuality.idl
@@ -28,11 +28,11 @@
NoInterfaceObject,
ImplementationLacksVTable,
] interface VideoPlaybackQuality {
- readonly attribute double creationTime;
+ readonly attribute unrestricted double creationTime;
readonly attribute unsigned long totalVideoFrames;
readonly attribute unsigned long droppedVideoFrames;
readonly attribute unsigned long corruptedVideoFrames;
- readonly attribute double totalFrameDelay;
+ readonly attribute unrestricted double totalFrameDelay;
};
diff --git a/Source/WebCore/Modules/mediasource/VideoTrackMediaSource.h b/Source/WebCore/Modules/mediasource/VideoTrackMediaSource.h
new file mode 100644
index 000000000..8e0f5f43e
--- /dev/null
+++ b/Source/WebCore/Modules/mediasource/VideoTrackMediaSource.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(MEDIA_SOURCE) && ENABLE(VIDEO_TRACK)
+
+#include "VideoTrack.h"
+
+namespace WebCore {
+
+class VideoTrackMediaSource {
+public:
+ static SourceBuffer* sourceBuffer(VideoTrack& track) { return track.sourceBuffer(); }
+};
+
+} // namespace WebCore
+
+#endif // ENABLE(MEDIA_SOURCE) && ENABLE(VIDEO_TRACK)
diff --git a/Source/WebCore/Modules/mediasource/VideoTrackMediaSource.idl b/Source/WebCore/Modules/mediasource/VideoTrackMediaSource.idl
index 4866a28b0..0f7c65b25 100644
--- a/Source/WebCore/Modules/mediasource/VideoTrackMediaSource.idl
+++ b/Source/WebCore/Modules/mediasource/VideoTrackMediaSource.idl
@@ -10,10 +10,10 @@
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
- * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR