summaryrefslogtreecommitdiff
path: root/chromium/third_party/blink/renderer/modules/webaudio
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/third_party/blink/renderer/modules/webaudio')
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/analyser_node.cc10
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/analyser_node.h4
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/async_audio_decoder.cc4
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_basic_inspector_node.cc2
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_basic_inspector_node.h2
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_basic_processor_handler.cc6
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_basic_processor_handler.h6
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_basic_processor_handler_test.cc4
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_buffer.cc64
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_buffer.h28
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_buffer_source_node.cc36
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_buffer_source_node.h8
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_context.cc84
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_context.h8
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_context_autoplay_test.cc40
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_context_test.cc49
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_destination_node.cc4
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_destination_node.h19
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_listener.cc38
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_listener.h20
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_node.cc63
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_node.h37
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_node_input.cc8
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_node_input.h4
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_node_output.cc6
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_node_output.h2
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_param.cc8
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_param_map.h2
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_param_timeline.cc37
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_param_timeline.h8
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_processing_event.cc14
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_processing_event.h4
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_scheduled_source_node.cc16
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_global_scope.cc2
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_global_scope.h12
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_global_scope_test.cc27
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_messaging_proxy.cc8
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_node.cc54
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_node.h12
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_object_proxy.cc2
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_processor.cc2
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_processor_definition.cc12
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_processor_definition.h5
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_thread_test.cc19
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/base_audio_context.cc34
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/base_audio_context.h12
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/biquad_dsp_kernel.cc12
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/biquad_dsp_kernel.h2
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/biquad_filter_node.cc12
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/biquad_filter_node.h2
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/biquad_processor.cc10
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/biquad_processor.h6
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/channel_merger_node.cc8
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/channel_merger_node.h6
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/channel_splitter_node.cc8
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/channel_splitter_node.h6
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/constant_source_node.cc8
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/constant_source_node.h4
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/convolver_node.cc14
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/convolver_node.h6
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/default_audio_destination_node.cc13
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/default_audio_destination_node.h7
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/deferred_task_handler.cc30
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/deferred_task_handler.h17
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/delay_dsp_kernel.cc10
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/delay_dsp_kernel.h4
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/delay_node.cc8
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/delay_node.h7
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/delay_processor.cc6
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/delay_processor.h2
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/dynamics_compressor_node.cc24
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/dynamics_compressor_node.h12
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/gain_node.cc20
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/gain_node.h11
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/iir_filter_node.cc14
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/iir_filter_node.h12
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/iir_processor.cc4
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/iir_processor.h4
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/iirdsp_kernel.cc2
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/iirdsp_kernel.h2
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/media_element_audio_source_node.cc18
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/media_element_audio_source_node.h15
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_destination_node.cc31
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_destination_node.h19
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_source_node.cc15
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_source_node.h18
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/offline_audio_completion_event.cc17
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/offline_audio_completion_event.h11
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/offline_audio_context.cc26
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/offline_audio_context.h19
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/offline_audio_destination_node.cc30
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/offline_audio_destination_node.h17
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/oscillator_node.cc27
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/oscillator_node.h13
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/panner_node.cc82
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/panner_node.h14
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/periodic_wave.cc29
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/periodic_wave.h5
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/realtime_analyser.cc14
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/realtime_analyser.h8
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/script_processor_node.cc27
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/script_processor_node.h23
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/stereo_panner_node.cc18
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/stereo_panner_node.h13
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_dsp_kernel.cc28
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_dsp_kernel.h10
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_node.cc10
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_node.h6
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_processor.cc4
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_processor.h4
110 files changed, 898 insertions, 832 deletions
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/analyser_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/analyser_node.cc
index 39ee21aae4b..a6887cfd30d 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/analyser_node.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/analyser_node.cc
@@ -48,7 +48,7 @@ AnalyserHandler::~AnalyserHandler() {
Uninitialize();
}
-void AnalyserHandler::Process(size_t frames_to_process) {
+void AnalyserHandler::Process(uint32_t frames_to_process) {
AudioBus* output_bus = Output(0).Bus();
if (!IsInitialized()) {
@@ -205,7 +205,7 @@ AnalyserNode* AnalyserNode::Create(BaseAudioContext& context,
}
AnalyserNode* AnalyserNode::Create(BaseAudioContext* context,
- const AnalyserOptions& options,
+ const AnalyserOptions* options,
ExceptionState& exception_state) {
DCHECK(IsMainThread());
@@ -216,13 +216,13 @@ AnalyserNode* AnalyserNode::Create(BaseAudioContext* context,
node->HandleChannelOptions(options, exception_state);
- node->setFftSize(options.fftSize(), exception_state);
- node->setSmoothingTimeConstant(options.smoothingTimeConstant(),
+ node->setFftSize(options->fftSize(), exception_state);
+ node->setSmoothingTimeConstant(options->smoothingTimeConstant(),
exception_state);
// minDecibels and maxDecibels have default values. Set both of the values
// at once.
- node->SetMinMaxDecibels(options.minDecibels(), options.maxDecibels(),
+ node->SetMinMaxDecibels(options->minDecibels(), options->maxDecibels(),
exception_state);
return node;
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/analyser_node.h b/chromium/third_party/blink/renderer/modules/webaudio/analyser_node.h
index 3b805158e30..235bfd7c0cb 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/analyser_node.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/analyser_node.h
@@ -43,7 +43,7 @@ class AnalyserHandler final : public AudioBasicInspectorHandler {
~AnalyserHandler() override;
// AudioHandler
- void Process(size_t frames_to_process) override;
+ void Process(uint32_t frames_to_process) override;
unsigned FftSize() const { return analyser_.FftSize(); }
void SetFftSize(unsigned size, ExceptionState&);
@@ -103,7 +103,7 @@ class AnalyserNode final : public AudioBasicInspectorNode {
public:
static AnalyserNode* Create(BaseAudioContext&, ExceptionState&);
static AnalyserNode* Create(BaseAudioContext*,
- const AnalyserOptions&,
+ const AnalyserOptions*,
ExceptionState&);
unsigned fftSize() const;
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/async_audio_decoder.cc b/chromium/third_party/blink/renderer/modules/webaudio/async_audio_decoder.cc
index aae1753fe87..08554e404cc 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/async_audio_decoder.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/async_audio_decoder.cc
@@ -34,8 +34,8 @@
#include "third_party/blink/renderer/platform/audio/audio_file_reader.h"
#include "third_party/blink/renderer/platform/cross_thread_functional.h"
#include "third_party/blink/renderer/platform/scheduler/public/background_scheduler.h"
+#include "third_party/blink/renderer/platform/scheduler/public/post_cross_thread_task.h"
#include "third_party/blink/renderer/platform/scheduler/public/thread.h"
-#include "third_party/blink/renderer/platform/web_task_runner.h"
namespace blink {
@@ -55,7 +55,7 @@ void AsyncAudioDecoder::DecodeAsync(
context->GetExecutionContext()->GetTaskRunner(
blink::TaskType::kInternalMedia);
- BackgroundScheduler::PostOnBackgroundThread(
+ background_scheduler::PostOnBackgroundThread(
FROM_HERE,
CrossThreadBind(&AsyncAudioDecoder::DecodeOnBackgroundThread,
WrapCrossThreadPersistent(audio_data), sample_rate,
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_basic_inspector_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_basic_inspector_node.cc
index bc5eb7f158c..5df6a023764 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/audio_basic_inspector_node.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_basic_inspector_node.cc
@@ -44,7 +44,7 @@ AudioBasicInspectorHandler::AudioBasicInspectorHandler(
// advantage of in-place processing, where the input is simply passed through
// unprocessed to the output.
// Note: this only applies if the input and output channel counts match.
-void AudioBasicInspectorHandler::PullInputs(size_t frames_to_process) {
+void AudioBasicInspectorHandler::PullInputs(uint32_t frames_to_process) {
// Render input stream - try to render directly into output bus for
// pass-through processing where process() doesn't need to do anything...
Input(0).Pull(Output(0).Bus(), frames_to_process);
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_basic_inspector_node.h b/chromium/third_party/blink/renderer/modules/webaudio/audio_basic_inspector_node.h
index eaaf754ef94..884495b635b 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/audio_basic_inspector_node.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_basic_inspector_node.h
@@ -44,7 +44,7 @@ class AudioBasicInspectorHandler : public AudioHandler {
unsigned output_channel_count);
// AudioHandler
- void PullInputs(size_t frames_to_process) final;
+ void PullInputs(uint32_t frames_to_process) final;
void CheckNumberOfChannelsForInput(AudioNodeInput*) final;
// AudioNode
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_basic_processor_handler.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_basic_processor_handler.cc
index a7f6b05aed0..7be1d80b6aa 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/audio_basic_processor_handler.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_basic_processor_handler.cc
@@ -68,7 +68,7 @@ void AudioBasicProcessorHandler::Uninitialize() {
AudioHandler::Uninitialize();
}
-void AudioBasicProcessorHandler::Process(size_t frames_to_process) {
+void AudioBasicProcessorHandler::Process(uint32_t frames_to_process) {
AudioBus* destination_bus = Output(0).Bus();
if (!IsInitialized() || !Processor() ||
@@ -87,7 +87,7 @@ void AudioBasicProcessorHandler::Process(size_t frames_to_process) {
}
void AudioBasicProcessorHandler::ProcessOnlyAudioParams(
- size_t frames_to_process) {
+ uint32_t frames_to_process) {
if (!IsInitialized() || !Processor())
return;
@@ -95,7 +95,7 @@ void AudioBasicProcessorHandler::ProcessOnlyAudioParams(
}
// Nice optimization in the very common case allowing for "in-place" processing
-void AudioBasicProcessorHandler::PullInputs(size_t frames_to_process) {
+void AudioBasicProcessorHandler::PullInputs(uint32_t frames_to_process) {
// Render input stream - suggest to the input to render directly into output
// bus for in-place processing in process() if possible.
Input(0).Pull(Output(0).Bus(), frames_to_process);
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_basic_processor_handler.h b/chromium/third_party/blink/renderer/modules/webaudio/audio_basic_processor_handler.h
index 056e60f0ffb..e4951b6887c 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/audio_basic_processor_handler.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_basic_processor_handler.h
@@ -43,9 +43,9 @@ class MODULES_EXPORT AudioBasicProcessorHandler : public AudioHandler {
~AudioBasicProcessorHandler() override;
// AudioHandler
- void Process(size_t frames_to_process) final;
- void ProcessOnlyAudioParams(size_t frames_to_process) final;
- void PullInputs(size_t frames_to_process) final;
+ void Process(uint32_t frames_to_process) final;
+ void ProcessOnlyAudioParams(uint32_t frames_to_process) final;
+ void PullInputs(uint32_t frames_to_process) final;
void Initialize() final;
void Uninitialize() final;
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_basic_processor_handler_test.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_basic_processor_handler_test.cc
index bb908aed720..1daf55e63a2 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/audio_basic_processor_handler_test.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_basic_processor_handler_test.cc
@@ -16,7 +16,7 @@ class MockAudioProcessor final : public AudioProcessor {
MockAudioProcessor() : AudioProcessor(48000, 2) {}
void Initialize() override { initialized_ = true; }
void Uninitialize() override { initialized_ = false; }
- void Process(const AudioBus*, AudioBus*, size_t) override {}
+ void Process(const AudioBus*, AudioBus*, uint32_t) override {}
void Reset() override {}
void SetNumberOfChannels(unsigned) override {}
unsigned NumberOfChannels() const override { return number_of_channels_; }
@@ -53,7 +53,7 @@ TEST(AudioBasicProcessorHandlerTest, ProcessorFinalization) {
std::unique_ptr<DummyPageHolder> page = DummyPageHolder::Create();
OfflineAudioContext* context = OfflineAudioContext::Create(
&page->GetDocument(), 2, 1, 48000, ASSERT_NO_EXCEPTION);
- MockProcessorNode* node = new MockProcessorNode(*context);
+ MockProcessorNode* node = MakeGarbageCollected<MockProcessorNode>(*context);
AudioBasicProcessorHandler& handler =
static_cast<AudioBasicProcessorHandler&>(node->Handler());
EXPECT_TRUE(handler.Processor());
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_buffer.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_buffer.cc
index e3133cde0da..37818f29ab1 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/audio_buffer.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_buffer.cc
@@ -40,9 +40,9 @@
namespace blink {
AudioBuffer* AudioBuffer::Create(unsigned number_of_channels,
- size_t number_of_frames,
+ uint32_t number_of_frames,
float sample_rate) {
- if (!AudioUtilities::IsValidAudioBufferSampleRate(sample_rate) ||
+ if (!audio_utilities::IsValidAudioBufferSampleRate(sample_rate) ||
number_of_channels > BaseAudioContext::MaxNumberOfChannels() ||
!number_of_channels || !number_of_frames)
return nullptr;
@@ -56,7 +56,7 @@ AudioBuffer* AudioBuffer::Create(unsigned number_of_channels,
}
AudioBuffer* AudioBuffer::Create(unsigned number_of_channels,
- size_t number_of_frames,
+ uint32_t number_of_frames,
float sample_rate,
ExceptionState& exception_state) {
if (!number_of_channels ||
@@ -71,14 +71,14 @@ AudioBuffer* AudioBuffer::Create(unsigned number_of_channels,
return nullptr;
}
- if (!AudioUtilities::IsValidAudioBufferSampleRate(sample_rate)) {
+ if (!audio_utilities::IsValidAudioBufferSampleRate(sample_rate)) {
exception_state.ThrowDOMException(
DOMExceptionCode::kNotSupportedError,
ExceptionMessages::IndexOutsideRange(
"sample rate", sample_rate,
- AudioUtilities::MinAudioBufferSampleRate(),
+ audio_utilities::MinAudioBufferSampleRate(),
ExceptionMessages::kInclusiveBound,
- AudioUtilities::MaxAudioBufferSampleRate(),
+ audio_utilities::MaxAudioBufferSampleRate(),
ExceptionMessages::kInclusiveBound));
return nullptr;
}
@@ -86,8 +86,8 @@ AudioBuffer* AudioBuffer::Create(unsigned number_of_channels,
if (!number_of_frames) {
exception_state.ThrowDOMException(
DOMExceptionCode::kNotSupportedError,
- ExceptionMessages::IndexExceedsMinimumBound(
- "number of frames", number_of_frames, static_cast<size_t>(0)));
+ ExceptionMessages::IndexExceedsMinimumBound("number of frames",
+ number_of_frames, 0u));
return nullptr;
}
@@ -105,16 +105,16 @@ AudioBuffer* AudioBuffer::Create(unsigned number_of_channels,
return audio_buffer;
}
-AudioBuffer* AudioBuffer::Create(const AudioBufferOptions& options,
+AudioBuffer* AudioBuffer::Create(const AudioBufferOptions* options,
ExceptionState& exception_state) {
- return Create(options.numberOfChannels(), options.length(),
- options.sampleRate(), exception_state);
+ return Create(options->numberOfChannels(), options->length(),
+ options->sampleRate(), exception_state);
}
AudioBuffer* AudioBuffer::CreateUninitialized(unsigned number_of_channels,
- size_t number_of_frames,
+ uint32_t number_of_frames,
float sample_rate) {
- if (!AudioUtilities::IsValidAudioBufferSampleRate(sample_rate) ||
+ if (!audio_utilities::IsValidAudioBufferSampleRate(sample_rate) ||
number_of_channels > BaseAudioContext::MaxNumberOfChannels() ||
!number_of_channels || !number_of_frames)
return nullptr;
@@ -157,7 +157,7 @@ bool AudioBuffer::CreatedSuccessfully(
}
DOMFloat32Array* AudioBuffer::CreateFloat32ArrayOrNull(
- size_t length,
+ uint32_t length,
InitializationPolicy policy) {
scoped_refptr<WTF::Float32Array> buffer;
@@ -180,7 +180,7 @@ DOMFloat32Array* AudioBuffer::CreateFloat32ArrayOrNull(
}
AudioBuffer::AudioBuffer(unsigned number_of_channels,
- size_t number_of_frames,
+ uint32_t number_of_frames,
float sample_rate,
InitializationPolicy policy)
: sample_rate_(sample_rate), length_(number_of_frames) {
@@ -243,23 +243,23 @@ NotShared<DOMFloat32Array> AudioBuffer::getChannelData(unsigned channel_index) {
}
void AudioBuffer::copyFromChannel(NotShared<DOMFloat32Array> destination,
- long channel_number,
+ int32_t channel_number,
ExceptionState& exception_state) {
return copyFromChannel(destination, channel_number, 0, exception_state);
}
void AudioBuffer::copyFromChannel(NotShared<DOMFloat32Array> destination,
- long channel_number,
- unsigned long start_in_channel,
+ int32_t channel_number,
+ uint32_t start_in_channel,
ExceptionState& exception_state) {
if (channel_number < 0 ||
- channel_number >= static_cast<long>(channels_.size())) {
+ static_cast<uint32_t>(channel_number) >= channels_.size()) {
exception_state.ThrowDOMException(
DOMExceptionCode::kIndexSizeError,
ExceptionMessages::IndexOutsideRange(
- "channelNumber", channel_number, 0L,
+ "channelNumber", channel_number, 0,
ExceptionMessages::kInclusiveBound,
- static_cast<long>(channels_.size() - 1),
+ static_cast<int32_t>(channels_.size() - 1),
ExceptionMessages::kInclusiveBound));
return;
@@ -271,9 +271,8 @@ void AudioBuffer::copyFromChannel(NotShared<DOMFloat32Array> destination,
exception_state.ThrowDOMException(
DOMExceptionCode::kIndexSizeError,
ExceptionMessages::IndexOutsideRange(
- "startInChannel", start_in_channel, 0UL,
- ExceptionMessages::kInclusiveBound,
- static_cast<unsigned long>(channel_data->length()),
+ "startInChannel", start_in_channel, 0U,
+ ExceptionMessages::kInclusiveBound, channel_data->length(),
ExceptionMessages::kExclusiveBound));
return;
@@ -292,23 +291,23 @@ void AudioBuffer::copyFromChannel(NotShared<DOMFloat32Array> destination,
}
void AudioBuffer::copyToChannel(NotShared<DOMFloat32Array> source,
- long channel_number,
+ int32_t channel_number,
ExceptionState& exception_state) {
return copyToChannel(source, channel_number, 0, exception_state);
}
void AudioBuffer::copyToChannel(NotShared<DOMFloat32Array> source,
- long channel_number,
- unsigned long start_in_channel,
+ int32_t channel_number,
+ uint32_t start_in_channel,
ExceptionState& exception_state) {
if (channel_number < 0 ||
- channel_number >= static_cast<long>(channels_.size())) {
+ static_cast<uint32_t>(channel_number) >= channels_.size()) {
exception_state.ThrowDOMException(
DOMExceptionCode::kIndexSizeError,
ExceptionMessages::IndexOutsideRange(
- "channelNumber", channel_number, 0L,
+ "channelNumber", channel_number, 0,
ExceptionMessages::kInclusiveBound,
- static_cast<long>(channels_.size() - 1),
+ static_cast<int32_t>(channels_.size() - 1),
ExceptionMessages::kInclusiveBound));
return;
}
@@ -319,9 +318,8 @@ void AudioBuffer::copyToChannel(NotShared<DOMFloat32Array> source,
exception_state.ThrowDOMException(
DOMExceptionCode::kIndexSizeError,
ExceptionMessages::IndexOutsideRange(
- "startInChannel", start_in_channel, 0UL,
- ExceptionMessages::kInclusiveBound,
- static_cast<unsigned long>(channel_data->length()),
+ "startInChannel", start_in_channel, 0U,
+ ExceptionMessages::kInclusiveBound, channel_data->length(),
ExceptionMessages::kExclusiveBound));
return;
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_buffer.h b/chromium/third_party/blink/renderer/modules/webaudio/audio_buffer.h
index d9bdda78b09..9867f77b9ac 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/audio_buffer.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_buffer.h
@@ -47,13 +47,13 @@ class MODULES_EXPORT AudioBuffer final : public ScriptWrappable {
public:
static AudioBuffer* Create(unsigned number_of_channels,
- size_t number_of_frames,
+ uint32_t number_of_frames,
float sample_rate);
static AudioBuffer* Create(unsigned number_of_channels,
- size_t number_of_frames,
+ uint32_t number_of_frames,
float sample_rate,
ExceptionState&);
- static AudioBuffer* Create(const AudioBufferOptions&, ExceptionState&);
+ static AudioBuffer* Create(const AudioBufferOptions*, ExceptionState&);
// Creates an AudioBuffer with uninitialized contents. This should
// only be used where we are guaranteed to initialize the contents
@@ -61,7 +61,7 @@ class MODULES_EXPORT AudioBuffer final : public ScriptWrappable {
// is done. |OfflineAudioContext::startRendering()| is one such
// place.
static AudioBuffer* CreateUninitialized(unsigned number_of_channels,
- size_t number_of_frames,
+ uint32_t number_of_frames,
float sample_rate);
// Returns 0 if data is not a valid audio file.
@@ -73,7 +73,7 @@ class MODULES_EXPORT AudioBuffer final : public ScriptWrappable {
static AudioBuffer* CreateFromAudioBus(AudioBus*);
// Format
- size_t length() const { return length_; }
+ uint32_t length() const { return length_; }
double duration() const {
return length() / static_cast<double>(sampleRate());
}
@@ -85,18 +85,18 @@ class MODULES_EXPORT AudioBuffer final : public ScriptWrappable {
ExceptionState&);
NotShared<DOMFloat32Array> getChannelData(unsigned channel_index);
void copyFromChannel(NotShared<DOMFloat32Array>,
- long channel_number,
+ int32_t channel_number,
ExceptionState&);
void copyFromChannel(NotShared<DOMFloat32Array>,
- long channel_number,
- unsigned long start_in_channel,
+ int32_t channel_number,
+ uint32_t start_in_channel,
ExceptionState&);
void copyToChannel(NotShared<DOMFloat32Array>,
- long channel_number,
+ int32_t channel_number,
ExceptionState&);
void copyToChannel(NotShared<DOMFloat32Array>,
- long channel_number,
- unsigned long start_in_channel,
+ int32_t channel_number,
+ uint32_t start_in_channel,
ExceptionState&);
void Zero();
@@ -115,17 +115,17 @@ class MODULES_EXPORT AudioBuffer final : public ScriptWrappable {
explicit AudioBuffer(AudioBus*);
static DOMFloat32Array* CreateFloat32ArrayOrNull(
- size_t length,
+ uint32_t length,
InitializationPolicy allocation_policy = kZeroInitialize);
AudioBuffer(unsigned number_of_channels,
- size_t number_of_frames,
+ uint32_t number_of_frames,
float sample_rate,
InitializationPolicy allocation_policy = kZeroInitialize);
bool CreatedSuccessfully(unsigned desired_number_of_channels) const;
float sample_rate_;
- size_t length_;
+ uint32_t length_;
HeapVector<Member<DOMFloat32Array>> channels_;
};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_buffer_source_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_buffer_source_node.cc
index 74ce5c17336..5f1162e3425 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/audio_buffer_source_node.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_buffer_source_node.cc
@@ -93,7 +93,7 @@ AudioBufferSourceHandler::~AudioBufferSourceHandler() {
Uninitialize();
}
-void AudioBufferSourceHandler::Process(size_t frames_to_process) {
+void AudioBufferSourceHandler::Process(uint32_t frames_to_process) {
AudioBus* output_bus = Output(0).Bus();
if (!IsInitialized()) {
@@ -119,8 +119,8 @@ void AudioBufferSourceHandler::Process(size_t frames_to_process) {
return;
}
- size_t quantum_frame_offset;
- size_t buffer_frames_to_process;
+ uint32_t quantum_frame_offset;
+ uint32_t buffer_frames_to_process;
double start_time_offset;
std::tie(quantum_frame_offset, buffer_frames_to_process,
@@ -154,7 +154,7 @@ void AudioBufferSourceHandler::Process(size_t frames_to_process) {
bool AudioBufferSourceHandler::RenderSilenceAndFinishIfNotLooping(
AudioBus*,
unsigned index,
- size_t frames_to_process) {
+ uint32_t frames_to_process) {
if (!Loop()) {
// If we're not looping, then stop playing when we get to the end.
@@ -176,7 +176,7 @@ bool AudioBufferSourceHandler::RenderSilenceAndFinishIfNotLooping(
bool AudioBufferSourceHandler::RenderFromBuffer(
AudioBus* bus,
unsigned destination_frame_offset,
- size_t number_of_frames) {
+ uint32_t number_of_frames) {
DCHECK(Context()->IsAudioThread());
// Basic sanity checking
@@ -198,8 +198,8 @@ bool AudioBufferSourceHandler::RenderFromBuffer(
size_t destination_length = bus->length();
bool is_length_good =
- destination_length <= AudioUtilities::kRenderQuantumFrames &&
- number_of_frames <= AudioUtilities::kRenderQuantumFrames;
+ destination_length <= audio_utilities::kRenderQuantumFrames &&
+ number_of_frames <= audio_utilities::kRenderQuantumFrames;
DCHECK(is_length_good);
if (!is_length_good)
return false;
@@ -221,13 +221,13 @@ bool AudioBufferSourceHandler::RenderFromBuffer(
// Offset the pointers to the correct offset frame.
unsigned write_index = destination_frame_offset;
- size_t buffer_length = Buffer()->length();
+ uint32_t buffer_length = Buffer()->length();
double buffer_sample_rate = Buffer()->sampleRate();
// Avoid converting from time to sample-frames twice by computing
// the grain end time first before computing the sample frame.
unsigned end_frame =
- is_grain_ ? AudioUtilities::TimeToSampleFrame(
+ is_grain_ ? audio_utilities::TimeToSampleFrame(
grain_offset_ + grain_duration_, buffer_sample_rate)
: buffer_length;
@@ -482,7 +482,7 @@ void AudioBufferSourceHandler::ClampGrainParameters(const AudioBuffer* buffer) {
// identical to the PCM data stored in the buffer. Since playbackRate == 1 is
// very common, it's worth considering quality.
virtual_read_index_ =
- AudioUtilities::TimeToSampleFrame(grain_offset_, buffer->sampleRate());
+ audio_utilities::TimeToSampleFrame(grain_offset_, buffer->sampleRate());
}
void AudioBufferSourceHandler::Start(double when,
@@ -687,7 +687,7 @@ AudioBufferSourceNode* AudioBufferSourceNode::Create(
AudioBufferSourceNode* AudioBufferSourceNode::Create(
BaseAudioContext* context,
- AudioBufferSourceOptions& options,
+ AudioBufferSourceOptions* options,
ExceptionState& exception_state) {
DCHECK(IsMainThread());
@@ -696,13 +696,13 @@ AudioBufferSourceNode* AudioBufferSourceNode::Create(
if (!node)
return nullptr;
- if (options.hasBuffer())
- node->setBuffer(options.buffer(), exception_state);
- node->detune()->setValue(options.detune());
- node->setLoop(options.loop());
- node->setLoopEnd(options.loopEnd());
- node->setLoopStart(options.loopStart());
- node->playbackRate()->setValue(options.playbackRate());
+ if (options->hasBuffer())
+ node->setBuffer(options->buffer(), exception_state);
+ node->detune()->setValue(options->detune());
+ node->setLoop(options->loop());
+ node->setLoopEnd(options->loopEnd());
+ node->setLoopStart(options->loopStart());
+ node->playbackRate()->setValue(options->playbackRate());
return node;
}
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_buffer_source_node.h b/chromium/third_party/blink/renderer/modules/webaudio/audio_buffer_source_node.h
index 0547dc956c1..27e28265e96 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/audio_buffer_source_node.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_buffer_source_node.h
@@ -55,7 +55,7 @@ class AudioBufferSourceHandler final : public AudioScheduledSourceHandler {
~AudioBufferSourceHandler() override;
// AudioHandler
- void Process(size_t frames_to_process) override;
+ void Process(uint32_t frames_to_process) override;
// setBuffer() is called on the main thread. This is the buffer we use for
// playback.
@@ -110,12 +110,12 @@ class AudioBufferSourceHandler final : public AudioScheduledSourceHandler {
// Returns true on success.
bool RenderFromBuffer(AudioBus*,
unsigned destination_frame_offset,
- size_t number_of_frames);
+ uint32_t number_of_frames);
// Render silence starting from "index" frame in AudioBus.
inline bool RenderSilenceAndFinishIfNotLooping(AudioBus*,
unsigned index,
- size_t frames_to_process);
+ uint32_t frames_to_process);
// Clamps grain parameters to the duration of the given AudioBuffer.
void ClampGrainParameters(const AudioBuffer*);
@@ -191,7 +191,7 @@ class AudioBufferSourceNode final : public AudioScheduledSourceNode {
public:
static AudioBufferSourceNode* Create(BaseAudioContext&, ExceptionState&);
static AudioBufferSourceNode* Create(BaseAudioContext*,
- AudioBufferSourceOptions&,
+ AudioBufferSourceOptions*,
ExceptionState&);
void Trace(blink::Visitor*) override;
AudioBufferSourceHandler& GetAudioBufferSourceHandler() const;
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_context.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_context.cc
index 9669d35a3f2..51c52a2d08c 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/audio_context.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_context.cc
@@ -46,36 +46,37 @@ static unsigned g_hardware_context_count = 0;
static unsigned g_context_id = 0;
AudioContext* AudioContext::Create(Document& document,
- const AudioContextOptions& context_options,
+ const AudioContextOptions* context_options,
ExceptionState& exception_state) {
DCHECK(IsMainThread());
+ LOG(ERROR) << __func__;
UseCounter::CountCrossOriginIframe(
document, WebFeature::kAudioContextCrossOriginIframe);
WebAudioLatencyHint latency_hint(WebAudioLatencyHint::kCategoryInteractive);
- if (context_options.latencyHint().IsAudioContextLatencyCategory()) {
+ if (context_options->latencyHint().IsAudioContextLatencyCategory()) {
latency_hint = WebAudioLatencyHint(
- context_options.latencyHint().GetAsAudioContextLatencyCategory());
- } else if (context_options.latencyHint().IsDouble()) {
+ context_options->latencyHint().GetAsAudioContextLatencyCategory());
+ } else if (context_options->latencyHint().IsDouble()) {
// This should be the requested output latency in seconds, without taking
// into account double buffering (same as baseLatency).
latency_hint =
- WebAudioLatencyHint(context_options.latencyHint().GetAsDouble());
+ WebAudioLatencyHint(context_options->latencyHint().GetAsDouble());
}
AudioContext* audio_context = new AudioContext(document, latency_hint);
audio_context->PauseIfNeeded();
- if (!AudioUtilities::IsValidAudioBufferSampleRate(
+ if (!audio_utilities::IsValidAudioBufferSampleRate(
audio_context->sampleRate())) {
exception_state.ThrowDOMException(
DOMExceptionCode::kNotSupportedError,
ExceptionMessages::IndexOutsideRange(
"hardware sample rate", audio_context->sampleRate(),
- AudioUtilities::MinAudioBufferSampleRate(),
+ audio_utilities::MinAudioBufferSampleRate(),
ExceptionMessages::kInclusiveBound,
- AudioUtilities::MaxAudioBufferSampleRate(),
+ audio_utilities::MaxAudioBufferSampleRate(),
ExceptionMessages::kInclusiveBound));
return audio_context;
}
@@ -89,6 +90,7 @@ AudioContext* AudioContext::Create(Document& document,
audio_context->MaybeAllowAutoplayWithUnlockType(
AutoplayUnlockType::kContextConstructor);
if (audio_context->IsAllowedToStart()) {
+ LOG(ERROR) << "starting";
audio_context->StartRendering();
audio_context->SetContextState(kRunning);
}
@@ -125,12 +127,15 @@ AudioContext::AudioContext(Document& document,
: BaseAudioContext(&document, kRealtimeContext),
context_id_(g_context_id++) {
destination_node_ = DefaultAudioDestinationNode::Create(this, latency_hint);
+ LOG(ERROR) << __func__;
switch (GetAutoplayPolicy()) {
case AutoplayPolicy::Type::kNoUserGestureRequired:
+ LOG(ERROR) << "no user gesture";
break;
case AutoplayPolicy::Type::kUserGestureRequired:
case AutoplayPolicy::Type::kUserGestureRequiredForCrossOrigin:
+ LOG(ERROR) << "user gesture";
if (document.GetFrame() &&
document.GetFrame()->IsCrossOriginSubframe()) {
autoplay_status_ = AutoplayStatus::kAutoplayStatusFailed;
@@ -138,6 +143,7 @@ AudioContext::AudioContext(Document& document,
}
break;
case AutoplayPolicy::Type::kDocumentUserActivationRequired:
+ LOG(ERROR) << "document user activation";
autoplay_status_ = AutoplayStatus::kAutoplayStatusFailed;
user_gesture_required_ = true;
break;
@@ -197,6 +203,7 @@ ScriptPromise AudioContext::suspendContext(ScriptState* script_state) {
ScriptPromise AudioContext::resumeContext(ScriptState* script_state) {
DCHECK(IsMainThread());
+ LOG(ERROR) << __func__;
if (IsContextClosed()) {
return ScriptPromise::RejectWithDOMException(
@@ -237,17 +244,19 @@ ScriptPromise AudioContext::resumeContext(ScriptState* script_state) {
return promise;
}
-void AudioContext::getOutputTimestamp(ScriptState* script_state,
- AudioTimestamp& result) {
+AudioTimestamp* AudioContext::getOutputTimestamp(
+ ScriptState* script_state) const {
+ AudioTimestamp* result = AudioTimestamp::Create();
+
DCHECK(IsMainThread());
LocalDOMWindow* window = LocalDOMWindow::From(script_state);
if (!window)
- return;
+ return result;
if (!destination()) {
- result.setContextTime(0.0);
- result.setPerformanceTime(0.0);
- return;
+ result->setContextTime(0.0);
+ result->setPerformanceTime(0.0);
+ return result;
}
WindowPerformance* performance = DOMWindowPerformance::performance(*window);
@@ -266,8 +275,9 @@ void AudioContext::getOutputTimestamp(ScriptState* script_state,
if (performance_time < 0.0)
performance_time = 0.0;
- result.setContextTime(position.position);
- result.setPerformanceTime(performance_time);
+ result->setContextTime(position.position);
+ result->setPerformanceTime(performance_time);
+ return result;
}
ScriptPromise AudioContext::closeContext(ScriptState* script_state) {
@@ -423,6 +433,7 @@ void AudioContext::MaybeAllowAutoplayWithUnlockType(AutoplayUnlockType type) {
}
bool AudioContext::IsAllowedToStart() const {
+ LOG(ERROR) << __func__;
if (!user_gesture_required_)
return true;
@@ -504,33 +515,32 @@ void AudioContext::ContextDestroyed(ExecutionContext*) {
void AudioContext::NotifyAudibleAudioStarted() {
DCHECK(IsMainThread());
- if (!audio_context_manager_) {
- Document* document = GetDocument();
-
- // If there's no document don't bother to try to create the mojom interface.
- // This can happen if the document has been reloaded while the audio thread
- // is still running.
- if (!document) {
- return;
- }
-
- document->GetFrame()->GetInterfaceProvider().GetInterface(
- mojo::MakeRequest(&audio_context_manager_));
- }
-
- DCHECK(audio_context_manager_);
- audio_context_manager_->AudioContextAudiblePlaybackStarted(context_id_);
+ EnsureAudioContextManagerService();
+ if (audio_context_manager_)
+ audio_context_manager_->AudioContextAudiblePlaybackStarted(context_id_);
}
void AudioContext::NotifyAudibleAudioStopped() {
DCHECK(IsMainThread());
- DCHECK(audio_context_manager_);
- // If we don't have a document, we don't need to notify anyone that we've
- // stopped.
- if (GetDocument()) {
+ EnsureAudioContextManagerService();
+ if (audio_context_manager_)
audio_context_manager_->AudioContextAudiblePlaybackStopped(context_id_);
- }
+}
+
+void AudioContext::EnsureAudioContextManagerService() {
+ if (audio_context_manager_ || !GetDocument())
+ return;
+
+ GetDocument()->GetFrame()->GetInterfaceProvider().GetInterface(
+ mojo::MakeRequest(&audio_context_manager_));
+ audio_context_manager_.set_connection_error_handler(
+ WTF::Bind(&AudioContext::OnAudioContextManagerServiceConnectionError,
+ WrapWeakPersistent(this)));
+}
+
+void AudioContext::OnAudioContextManagerServiceConnectionError() {
+ audio_context_manager_ = nullptr;
}
} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_context.h b/chromium/third_party/blink/renderer/modules/webaudio/audio_context.h
index 7a3db4c0c1b..c9bd2a53bbd 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/audio_context.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_context.h
@@ -34,7 +34,7 @@ class MODULES_EXPORT AudioContext : public BaseAudioContext {
public:
static AudioContext* Create(Document&,
- const AudioContextOptions&,
+ const AudioContextOptions*,
ExceptionState&);
~AudioContext() override;
@@ -51,7 +51,7 @@ class MODULES_EXPORT AudioContext : public BaseAudioContext {
bool HasRealtimeConstraint() final { return true; }
- void getOutputTimestamp(ScriptState*, AudioTimestamp&);
+ AudioTimestamp* getOutputTimestamp(ScriptState*) const;
double baseLatency() const;
MediaElementAudioSourceNode* createMediaElementSource(HTMLMediaElement*,
@@ -72,6 +72,7 @@ class MODULES_EXPORT AudioContext : public BaseAudioContext {
private:
friend class AudioContextAutoplayTest;
+ friend class AudioContextTest;
// Do not change the order of this enum, it is used for metrics.
enum AutoplayStatus {
@@ -123,6 +124,9 @@ class MODULES_EXPORT AudioContext : public BaseAudioContext {
void NotifyAudibleAudioStarted() final;
void NotifyAudibleAudioStopped() final;
+ void EnsureAudioContextManagerService();
+ void OnAudioContextManagerServiceConnectionError();
+
unsigned context_id_;
Member<ScriptPromiseResolver> close_resolver_;
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_context_autoplay_test.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_context_autoplay_test.cc
index b9b40ca63c7..e58c2f40042 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/audio_context_autoplay_test.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_context_autoplay_test.cc
@@ -64,6 +64,8 @@ class MockWebAudioDeviceForAutoplayTest : public WebAudioDevice {
void Start() override {}
void Stop() override {}
+ void Pause() override {}
+ void Resume() override {}
double SampleRate() override { return sample_rate_; }
int FramesPerBuffer() override { return frames_per_buffer_; }
@@ -157,7 +159,7 @@ class AudioContextAutoplayTest
// Creates an AudioContext without a gesture inside a x-origin child frame.
TEST_P(AudioContextAutoplayTest, AutoplayMetrics_CreateNoGesture_Child) {
AudioContext* audio_context = AudioContext::Create(
- ChildDocument(), AudioContextOptions(), ASSERT_NO_EXCEPTION);
+ ChildDocument(), AudioContextOptions::Create(), ASSERT_NO_EXCEPTION);
RecordAutoplayStatus(audio_context);
switch (GetParam()) {
@@ -181,7 +183,7 @@ TEST_P(AudioContextAutoplayTest, AutoplayMetrics_CreateNoGesture_Child) {
// Creates an AudioContext without a gesture inside a main frame.
TEST_P(AudioContextAutoplayTest, AutoplayMetrics_CreateNoGesture_Main) {
AudioContext* audio_context = AudioContext::Create(
- GetDocument(), AudioContextOptions(), ASSERT_NO_EXCEPTION);
+ GetDocument(), AudioContextOptions::Create(), ASSERT_NO_EXCEPTION);
RecordAutoplayStatus(audio_context);
switch (GetParam()) {
@@ -207,7 +209,7 @@ TEST_P(AudioContextAutoplayTest,
ScriptState::Scope scope(GetScriptStateFrom(ChildDocument()));
AudioContext* audio_context = AudioContext::Create(
- ChildDocument(), AudioContextOptions(), ASSERT_NO_EXCEPTION);
+ ChildDocument(), AudioContextOptions::Create(), ASSERT_NO_EXCEPTION);
audio_context->resumeContext(GetScriptStateFrom(ChildDocument()));
RejectPendingResolvers(audio_context);
RecordAutoplayStatus(audio_context);
@@ -235,7 +237,7 @@ TEST_P(AudioContextAutoplayTest, AutoplayMetrics_CallResumeNoGesture_Main) {
ScriptState::Scope scope(GetScriptStateFrom(GetDocument()));
AudioContext* audio_context = AudioContext::Create(
- GetDocument(), AudioContextOptions(), ASSERT_NO_EXCEPTION);
+ GetDocument(), AudioContextOptions::Create(), ASSERT_NO_EXCEPTION);
audio_context->resumeContext(GetScriptStateFrom(ChildDocument()));
RejectPendingResolvers(audio_context);
RecordAutoplayStatus(audio_context);
@@ -263,7 +265,7 @@ TEST_P(AudioContextAutoplayTest, AutoplayMetrics_CreateGesture_Child) {
UserGestureToken::kNewGesture);
AudioContext* audio_context = AudioContext::Create(
- ChildDocument(), AudioContextOptions(), ASSERT_NO_EXCEPTION);
+ ChildDocument(), AudioContextOptions::Create(), ASSERT_NO_EXCEPTION);
RecordAutoplayStatus(audio_context);
switch (GetParam()) {
@@ -292,7 +294,7 @@ TEST_P(AudioContextAutoplayTest, AutoplayMetrics_CreateGesture_Main) {
UserGestureToken::kNewGesture);
AudioContext* audio_context = AudioContext::Create(
- GetDocument(), AudioContextOptions(), ASSERT_NO_EXCEPTION);
+ GetDocument(), AudioContextOptions::Create(), ASSERT_NO_EXCEPTION);
RecordAutoplayStatus(audio_context);
switch (GetParam()) {
@@ -317,7 +319,7 @@ TEST_P(AudioContextAutoplayTest, AutoplayMetrics_CallResumeGesture_Child) {
ScriptState::Scope scope(GetScriptStateFrom(ChildDocument()));
AudioContext* audio_context = AudioContext::Create(
- ChildDocument(), AudioContextOptions(), ASSERT_NO_EXCEPTION);
+ ChildDocument(), AudioContextOptions::Create(), ASSERT_NO_EXCEPTION);
std::unique_ptr<UserGestureIndicator> user_gesture_scope =
LocalFrame::NotifyUserActivation(ChildDocument().GetFrame(),
@@ -352,7 +354,7 @@ TEST_P(AudioContextAutoplayTest, AutoplayMetrics_CallResumeGesture_Main) {
ScriptState::Scope scope(GetScriptStateFrom(GetDocument()));
AudioContext* audio_context = AudioContext::Create(
- GetDocument(), AudioContextOptions(), ASSERT_NO_EXCEPTION);
+ GetDocument(), AudioContextOptions::Create(), ASSERT_NO_EXCEPTION);
std::unique_ptr<UserGestureIndicator> user_gesture_scope =
LocalFrame::NotifyUserActivation(GetDocument().GetFrame(),
@@ -382,7 +384,7 @@ TEST_P(AudioContextAutoplayTest, AutoplayMetrics_CallResumeGesture_Main) {
// x-origin child frame.
TEST_P(AudioContextAutoplayTest, AutoplayMetrics_NodeStartNoGesture_Child) {
AudioContext* audio_context = AudioContext::Create(
- ChildDocument(), AudioContextOptions(), ASSERT_NO_EXCEPTION);
+ ChildDocument(), AudioContextOptions::Create(), ASSERT_NO_EXCEPTION);
audio_context->NotifySourceNodeStart();
RecordAutoplayStatus(audio_context);
@@ -408,7 +410,7 @@ TEST_P(AudioContextAutoplayTest, AutoplayMetrics_NodeStartNoGesture_Child) {
// main frame.
TEST_P(AudioContextAutoplayTest, AutoplayMetrics_NodeStartNoGesture_Main) {
AudioContext* audio_context = AudioContext::Create(
- GetDocument(), AudioContextOptions(), ASSERT_NO_EXCEPTION);
+ GetDocument(), AudioContextOptions::Create(), ASSERT_NO_EXCEPTION);
audio_context->NotifySourceNodeStart();
RecordAutoplayStatus(audio_context);
@@ -432,7 +434,7 @@ TEST_P(AudioContextAutoplayTest, AutoplayMetrics_NodeStartNoGesture_Main) {
// x-origin child frame.
TEST_P(AudioContextAutoplayTest, AutoplayMetrics_NodeStartGesture_Child) {
AudioContext* audio_context = AudioContext::Create(
- ChildDocument(), AudioContextOptions(), ASSERT_NO_EXCEPTION);
+ ChildDocument(), AudioContextOptions::Create(), ASSERT_NO_EXCEPTION);
std::unique_ptr<UserGestureIndicator> user_gesture_scope =
LocalFrame::NotifyUserActivation(ChildDocument().GetFrame(),
@@ -463,7 +465,7 @@ TEST_P(AudioContextAutoplayTest, AutoplayMetrics_NodeStartGesture_Child) {
// main frame.
TEST_P(AudioContextAutoplayTest, AutoplayMetrics_NodeStartGesture_Main) {
AudioContext* audio_context = AudioContext::Create(
- GetDocument(), AudioContextOptions(), ASSERT_NO_EXCEPTION);
+ GetDocument(), AudioContextOptions::Create(), ASSERT_NO_EXCEPTION);
std::unique_ptr<UserGestureIndicator> user_gesture_scope =
LocalFrame::NotifyUserActivation(GetDocument().GetFrame(),
@@ -494,7 +496,7 @@ TEST_P(AudioContextAutoplayTest,
ScriptState::Scope scope(GetScriptStateFrom(ChildDocument()));
AudioContext* audio_context = AudioContext::Create(
- ChildDocument(), AudioContextOptions(), ASSERT_NO_EXCEPTION);
+ ChildDocument(), AudioContextOptions::Create(), ASSERT_NO_EXCEPTION);
audio_context->NotifySourceNodeStart();
std::unique_ptr<UserGestureIndicator> user_gesture_scope =
@@ -530,7 +532,7 @@ TEST_P(AudioContextAutoplayTest,
ScriptState::Scope scope(GetScriptStateFrom(GetDocument()));
AudioContext* audio_context = AudioContext::Create(
- GetDocument(), AudioContextOptions(), ASSERT_NO_EXCEPTION);
+ GetDocument(), AudioContextOptions::Create(), ASSERT_NO_EXCEPTION);
audio_context->NotifySourceNodeStart();
std::unique_ptr<UserGestureIndicator> user_gesture_scope =
@@ -563,7 +565,7 @@ TEST_P(AudioContextAutoplayTest,
ScriptState::Scope scope(GetScriptStateFrom(ChildDocument()));
AudioContext* audio_context = AudioContext::Create(
- ChildDocument(), AudioContextOptions(), ASSERT_NO_EXCEPTION);
+ ChildDocument(), AudioContextOptions::Create(), ASSERT_NO_EXCEPTION);
std::unique_ptr<UserGestureIndicator> user_gesture_scope =
LocalFrame::NotifyUserActivation(ChildDocument().GetFrame(),
@@ -599,7 +601,7 @@ TEST_P(AudioContextAutoplayTest,
ScriptState::Scope scope(GetScriptStateFrom(GetDocument()));
AudioContext* audio_context = AudioContext::Create(
- GetDocument(), AudioContextOptions(), ASSERT_NO_EXCEPTION);
+ GetDocument(), AudioContextOptions::Create(), ASSERT_NO_EXCEPTION);
std::unique_ptr<UserGestureIndicator> user_gesture_scope =
LocalFrame::NotifyUserActivation(GetDocument().GetFrame(),
@@ -633,7 +635,7 @@ TEST_P(AudioContextAutoplayTest,
UserGestureToken::kNewGesture);
AudioContext* audio_context = AudioContext::Create(
- ChildDocument(), AudioContextOptions(), ASSERT_NO_EXCEPTION);
+ ChildDocument(), AudioContextOptions::Create(), ASSERT_NO_EXCEPTION);
RecordAutoplayStatus(audio_context);
switch (GetParam()) {
@@ -670,7 +672,7 @@ TEST_P(AudioContextAutoplayTest,
UserGestureToken::kNewGesture);
AudioContext* audio_context = AudioContext::Create(
- GetDocument(), AudioContextOptions(), ASSERT_NO_EXCEPTION);
+ GetDocument(), AudioContextOptions::Create(), ASSERT_NO_EXCEPTION);
RecordAutoplayStatus(audio_context);
switch (GetParam()) {
@@ -697,7 +699,7 @@ TEST_P(AudioContextAutoplayTest,
true);
AudioContext* audio_context = AudioContext::Create(
- GetDocument(), AudioContextOptions(), ASSERT_NO_EXCEPTION);
+ GetDocument(), AudioContextOptions::Create(), ASSERT_NO_EXCEPTION);
RecordAutoplayStatus(audio_context);
switch (GetParam()) {
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_context_test.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_context_test.cc
index 2923f69a21f..9a7cbde4e5c 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/audio_context_test.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_context_test.cc
@@ -27,6 +27,8 @@ class MockWebAudioDeviceForAudioContext : public WebAudioDevice {
void Start() override {}
void Stop() override {}
+ void Pause() override {}
+ void Resume() override {}
double SampleRate() override { return sample_rate_; }
int FramesPerBuffer() override { return frames_per_buffer_; }
@@ -87,21 +89,31 @@ class AudioContextTest : public PageTestBase {
void SetUp() override { PageTestBase::SetUp(IntSize()); }
+ mojom::blink::AudioContextManagerPtr& GetAudioContextManagerPtrFor(
+ AudioContext* audio_context) {
+ return audio_context->audio_context_manager_;
+ }
+
+ void SetContextState(AudioContext* audio_context,
+ AudioContext::AudioContextState state) {
+ audio_context->SetContextState(state);
+ }
+
private:
std::unique_ptr<ScopedTestingPlatformSupport<AudioContextTestPlatform>>
platform_;
};
TEST_F(AudioContextTest, AudioContextOptions_WebAudioLatencyHint) {
- AudioContextOptions interactive_options;
- interactive_options.setLatencyHint(
+ AudioContextOptions* interactive_options = AudioContextOptions::Create();
+ interactive_options->setLatencyHint(
AudioContextLatencyCategoryOrDouble::FromAudioContextLatencyCategory(
"interactive"));
AudioContext* interactive_context = AudioContext::Create(
GetDocument(), interactive_options, ASSERT_NO_EXCEPTION);
- AudioContextOptions balanced_options;
- balanced_options.setLatencyHint(
+ AudioContextOptions* balanced_options = AudioContextOptions::Create();
+ balanced_options->setLatencyHint(
AudioContextLatencyCategoryOrDouble::FromAudioContextLatencyCategory(
"balanced"));
AudioContext* balanced_context = AudioContext::Create(
@@ -109,16 +121,16 @@ TEST_F(AudioContextTest, AudioContextOptions_WebAudioLatencyHint) {
EXPECT_GT(balanced_context->baseLatency(),
interactive_context->baseLatency());
- AudioContextOptions playback_options;
- playback_options.setLatencyHint(
+ AudioContextOptions* playback_options = AudioContextOptions::Create();
+ playback_options->setLatencyHint(
AudioContextLatencyCategoryOrDouble::FromAudioContextLatencyCategory(
"playback"));
AudioContext* playback_context = AudioContext::Create(
GetDocument(), playback_options, ASSERT_NO_EXCEPTION);
EXPECT_GT(playback_context->baseLatency(), balanced_context->baseLatency());
- AudioContextOptions exact_too_small_options;
- exact_too_small_options.setLatencyHint(
+ AudioContextOptions* exact_too_small_options = AudioContextOptions::Create();
+ exact_too_small_options->setLatencyHint(
AudioContextLatencyCategoryOrDouble::FromDouble(
interactive_context->baseLatency() / 2));
AudioContext* exact_too_small_context = AudioContext::Create(
@@ -129,15 +141,15 @@ TEST_F(AudioContextTest, AudioContextOptions_WebAudioLatencyHint) {
const double exact_latency_sec =
(interactive_context->baseLatency() + playback_context->baseLatency()) /
2;
- AudioContextOptions exact_ok_options;
- exact_ok_options.setLatencyHint(
+ AudioContextOptions* exact_ok_options = AudioContextOptions::Create();
+ exact_ok_options->setLatencyHint(
AudioContextLatencyCategoryOrDouble::FromDouble(exact_latency_sec));
AudioContext* exact_ok_context = AudioContext::Create(
GetDocument(), exact_ok_options, ASSERT_NO_EXCEPTION);
EXPECT_EQ(exact_ok_context->baseLatency(), exact_latency_sec);
- AudioContextOptions exact_too_big_options;
- exact_too_big_options.setLatencyHint(
+ AudioContextOptions* exact_too_big_options = AudioContextOptions::Create();
+ exact_too_big_options->setLatencyHint(
AudioContextLatencyCategoryOrDouble::FromDouble(
playback_context->baseLatency() * 2));
AudioContext* exact_too_big_context = AudioContext::Create(
@@ -146,4 +158,17 @@ TEST_F(AudioContextTest, AudioContextOptions_WebAudioLatencyHint) {
playback_context->baseLatency());
}
+TEST_F(AudioContextTest, AudioContextAudibility_ServiceUnbind) {
+ AudioContextOptions* options = AudioContextOptions::Create();
+ AudioContext* audio_context =
+ AudioContext::Create(GetDocument(), options, ASSERT_NO_EXCEPTION);
+
+ audio_context->set_was_audible_for_testing(true);
+ GetAudioContextManagerPtrFor(audio_context).reset();
+ SetContextState(audio_context, AudioContext::AudioContextState::kSuspended);
+
+ ScopedTestingPlatformSupport<TestingPlatformSupport> platform;
+ platform->RunUntilIdle();
+}
+
} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_destination_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_destination_node.cc
index ac37756aab8..b4050e53f5e 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/audio_destination_node.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_destination_node.cc
@@ -30,7 +30,7 @@
namespace blink {
AudioDestinationHandler::AudioDestinationHandler(AudioNode& node)
- : AudioHandler(kNodeTypeDestination, node, 0), current_sample_frame_(0) {
+ : AudioHandler(kNodeTypeDestination, node, 0) {
AddInput();
}
@@ -48,7 +48,7 @@ AudioDestinationHandler& AudioDestinationNode::GetAudioDestinationHandler()
return static_cast<AudioDestinationHandler&>(Handler());
}
-unsigned long AudioDestinationNode::maxChannelCount() const {
+uint32_t AudioDestinationNode::maxChannelCount() const {
return GetAudioDestinationHandler().MaxChannelCount();
}
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_destination_node.h b/chromium/third_party/blink/renderer/modules/webaudio/audio_destination_node.h
index c143f860bd3..818b3ac50b9 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/audio_destination_node.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_destination_node.h
@@ -26,8 +26,8 @@
#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_DESTINATION_NODE_H_
#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_DESTINATION_NODE_H_
+#include <atomic>
#include "third_party/blink/renderer/modules/webaudio/audio_node.h"
-#include "third_party/blink/renderer/platform/wtf/atomics.h"
namespace blink {
@@ -43,7 +43,7 @@ class AudioDestinationHandler : public AudioHandler {
// The method MUST NOT be invoked when rendering a graph because the
// destination node is a sink. Instead, this node gets pulled by the
// underlying renderer (audio hardware or worker thread).
- void Process(size_t) final { NOTREACHED(); }
+ void Process(uint32_t) final { NOTREACHED(); }
virtual void StartRendering() = 0;
virtual void StopRendering() = 0;
@@ -54,7 +54,7 @@ class AudioDestinationHandler : public AudioHandler {
virtual void RestartRendering() = 0;
size_t CurrentSampleFrame() const {
- return AcquireLoad(&current_sample_frame_);
+ return current_sample_frame_.load(std::memory_order_acquire);
}
double CurrentTime() const {
@@ -62,7 +62,7 @@ class AudioDestinationHandler : public AudioHandler {
}
virtual double SampleRate() const = 0;
- virtual unsigned long MaxChannelCount() const = 0;
+ virtual uint32_t MaxChannelCount() const = 0;
void ContextDestroyed() { is_execution_context_destroyed_ = true; }
bool IsExecutionContextDestroyed() const {
@@ -70,10 +70,15 @@ class AudioDestinationHandler : public AudioHandler {
}
protected:
- // The number of sample frames processed by the destination so far.
- size_t current_sample_frame_;
+ void AdvanceCurrentSampleFrame(size_t number_of_frames) {
+ current_sample_frame_.fetch_add(number_of_frames,
+ std::memory_order_release);
+ }
private:
+ // The number of sample frames processed by the destination so far.
+ std::atomic_size_t current_sample_frame_{0};
+
// True if the execution context is being destroyed. If this is true, the
// destination ndoe must avoid checking for or accessing the execution
// context.
@@ -92,7 +97,7 @@ class AudioDestinationNode : public AudioNode {
DEFINE_WRAPPERTYPEINFO();
public:
- unsigned long maxChannelCount() const;
+ uint32_t maxChannelCount() const;
// Returns its own handler object instead of a generic one from
// AudioNode::Handler().
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_listener.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_listener.cc
index 33574aae5cd..acc8566847e 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/audio_listener.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_listener.cc
@@ -91,15 +91,15 @@ AudioListener::AudioListener(BaseAudioContext& context)
AudioParamHandler::AutomationRateMode::kVariable)),
last_update_time_(-1),
is_listener_dirty_(false),
- position_x_values_(AudioUtilities::kRenderQuantumFrames),
- position_y_values_(AudioUtilities::kRenderQuantumFrames),
- position_z_values_(AudioUtilities::kRenderQuantumFrames),
- forward_x_values_(AudioUtilities::kRenderQuantumFrames),
- forward_y_values_(AudioUtilities::kRenderQuantumFrames),
- forward_z_values_(AudioUtilities::kRenderQuantumFrames),
- up_x_values_(AudioUtilities::kRenderQuantumFrames),
- up_y_values_(AudioUtilities::kRenderQuantumFrames),
- up_z_values_(AudioUtilities::kRenderQuantumFrames) {
+ position_x_values_(audio_utilities::kRenderQuantumFrames),
+ position_y_values_(audio_utilities::kRenderQuantumFrames),
+ position_z_values_(audio_utilities::kRenderQuantumFrames),
+ forward_x_values_(audio_utilities::kRenderQuantumFrames),
+ forward_y_values_(audio_utilities::kRenderQuantumFrames),
+ forward_z_values_(audio_utilities::kRenderQuantumFrames),
+ up_x_values_(audio_utilities::kRenderQuantumFrames),
+ up_y_values_(audio_utilities::kRenderQuantumFrames),
+ up_z_values_(audio_utilities::kRenderQuantumFrames) {
// Initialize the cached values with the current values. Thus, we don't need
// to notify any panners because we haved moved.
last_position_ = GetPosition();
@@ -148,7 +148,7 @@ bool AudioListener::HasSampleAccurateValues() const {
upZ()->Handler().HasSampleAccurateValues();
}
-void AudioListener::UpdateValuesIfNeeded(size_t frames_to_process) {
+void AudioListener::UpdateValuesIfNeeded(uint32_t frames_to_process) {
double current_time =
positionX()->Handler().DestinationHandler().CurrentTime();
if (last_update_time_ != current_time) {
@@ -192,47 +192,47 @@ void AudioListener::UpdateValuesIfNeeded(size_t frames_to_process) {
}
}
-const float* AudioListener::GetPositionXValues(size_t frames_to_process) {
+const float* AudioListener::GetPositionXValues(uint32_t frames_to_process) {
UpdateValuesIfNeeded(frames_to_process);
return position_x_values_.Data();
}
-const float* AudioListener::GetPositionYValues(size_t frames_to_process) {
+const float* AudioListener::GetPositionYValues(uint32_t frames_to_process) {
UpdateValuesIfNeeded(frames_to_process);
return position_y_values_.Data();
}
-const float* AudioListener::GetPositionZValues(size_t frames_to_process) {
+const float* AudioListener::GetPositionZValues(uint32_t frames_to_process) {
UpdateValuesIfNeeded(frames_to_process);
return position_z_values_.Data();
}
-const float* AudioListener::GetForwardXValues(size_t frames_to_process) {
+const float* AudioListener::GetForwardXValues(uint32_t frames_to_process) {
UpdateValuesIfNeeded(frames_to_process);
return forward_x_values_.Data();
}
-const float* AudioListener::GetForwardYValues(size_t frames_to_process) {
+const float* AudioListener::GetForwardYValues(uint32_t frames_to_process) {
UpdateValuesIfNeeded(frames_to_process);
return forward_y_values_.Data();
}
-const float* AudioListener::GetForwardZValues(size_t frames_to_process) {
+const float* AudioListener::GetForwardZValues(uint32_t frames_to_process) {
UpdateValuesIfNeeded(frames_to_process);
return forward_z_values_.Data();
}
-const float* AudioListener::GetUpXValues(size_t frames_to_process) {
+const float* AudioListener::GetUpXValues(uint32_t frames_to_process) {
UpdateValuesIfNeeded(frames_to_process);
return up_x_values_.Data();
}
-const float* AudioListener::GetUpYValues(size_t frames_to_process) {
+const float* AudioListener::GetUpYValues(uint32_t frames_to_process) {
UpdateValuesIfNeeded(frames_to_process);
return up_y_values_.Data();
}
-const float* AudioListener::GetUpZValues(size_t frames_to_process) {
+const float* AudioListener::GetUpZValues(uint32_t frames_to_process) {
UpdateValuesIfNeeded(frames_to_process);
return up_z_values_.Data();
}
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_listener.h b/chromium/third_party/blink/renderer/modules/webaudio/audio_listener.h
index 71fcae06954..608bd4de29a 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/audio_listener.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_listener.h
@@ -87,17 +87,17 @@ class AudioListener : public ScriptWrappable {
return FloatPoint3D(up_x_->value(), up_y_->value(), up_z_->value());
}
- const float* GetPositionXValues(size_t frames_to_process);
- const float* GetPositionYValues(size_t frames_to_process);
- const float* GetPositionZValues(size_t frames_to_process);
+ const float* GetPositionXValues(uint32_t frames_to_process);
+ const float* GetPositionYValues(uint32_t frames_to_process);
+ const float* GetPositionZValues(uint32_t frames_to_process);
- const float* GetForwardXValues(size_t frames_to_process);
- const float* GetForwardYValues(size_t frames_to_process);
- const float* GetForwardZValues(size_t frames_to_process);
+ const float* GetForwardXValues(uint32_t frames_to_process);
+ const float* GetForwardYValues(uint32_t frames_to_process);
+ const float* GetForwardZValues(uint32_t frames_to_process);
- const float* GetUpXValues(size_t frames_to_process);
- const float* GetUpYValues(size_t frames_to_process);
- const float* GetUpZValues(size_t frames_to_process);
+ const float* GetUpXValues(uint32_t frames_to_process);
+ const float* GetUpYValues(uint32_t frames_to_process);
+ const float* GetUpZValues(uint32_t frames_to_process);
// Position
void setPosition(float x, float y, float z, ExceptionState& exceptionState) {
@@ -167,7 +167,7 @@ class AudioListener : public ScriptWrappable {
// the audio thread.
bool is_listener_dirty_;
- void UpdateValuesIfNeeded(size_t frames_to_process);
+ void UpdateValuesIfNeeded(uint32_t frames_to_process);
AudioFloatArray position_x_values_;
AudioFloatArray position_y_values_;
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_node.cc
index f3f1d0d78c2..81343ac7a23 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/audio_node.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_node.cc
@@ -32,7 +32,6 @@
#include "third_party/blink/renderer/platform/bindings/exception_messages.h"
#include "third_party/blink/renderer/platform/bindings/exception_state.h"
#include "third_party/blink/renderer/platform/instance_counters.h"
-#include "third_party/blink/renderer/platform/wtf/atomics.h"
#if DEBUG_AUDIONODE_REFERENCES
#include <stdio.h>
@@ -76,8 +75,6 @@ AudioHandler::AudioHandler(NodeType node_type,
AudioHandler::~AudioHandler() {
DCHECK(IsMainThread());
- // dispose() should be called.
- DCHECK(!GetNode());
InstanceCounters::DecrementCounter(InstanceCounters::kAudioHandlerCounter);
#if DEBUG_AUDIONODE_REFERENCES
--node_count_[GetNodeType()];
@@ -111,7 +108,6 @@ void AudioHandler::Dispose() {
Context()->GetDeferredTaskHandler().RemoveAutomaticPullNode(this);
for (auto& output : outputs_)
output->Dispose();
- node_ = nullptr;
}
AudioNode* AudioHandler::GetNode() const {
@@ -203,7 +199,7 @@ AudioNodeOutput& AudioHandler::Output(unsigned i) {
return *outputs_[i];
}
-unsigned long AudioHandler::ChannelCount() {
+unsigned AudioHandler::ChannelCount() {
return channel_count_;
}
@@ -218,7 +214,7 @@ void AudioHandler::SetInternalChannelInterpretation(
new_channel_interpretation_ = interpretation;
}
-void AudioHandler::SetChannelCount(unsigned long channel_count,
+void AudioHandler::SetChannelCount(unsigned channel_count,
ExceptionState& exception_state) {
DCHECK(IsMainThread());
BaseAudioContext::GraphAutoLocker locker(Context());
@@ -316,7 +312,7 @@ void AudioHandler::UpdateChannelsForInputs() {
input->ChangedOutputs();
}
-void AudioHandler::ProcessIfNecessary(size_t frames_to_process) {
+void AudioHandler::ProcessIfNecessary(uint32_t frames_to_process) {
DCHECK(Context()->IsAudioThread());
if (!IsInitialized())
@@ -378,7 +374,7 @@ bool AudioHandler::PropagatesSilence() const {
Context()->currentTime();
}
-void AudioHandler::PullInputs(size_t frames_to_process) {
+void AudioHandler::PullInputs(uint32_t frames_to_process) {
DCHECK(Context()->IsAudioThread());
// Process all of the AudioNodes connected to our inputs.
@@ -453,10 +449,9 @@ void AudioHandler::DisableOutputsIfNecessary() {
// the outputs so that the tail for the node can be output.
// Otherwise, we can disable the outputs right away.
if (RequiresTailProcessing()) {
- if (Context()->ContextState() !=
- BaseAudioContext::AudioContextState::kClosed) {
- Context()->GetDeferredTaskHandler().AddTailProcessingHandler(this);
- }
+ auto& deferred_task_handler = Context()->GetDeferredTaskHandler();
+ if (deferred_task_handler.AcceptsTailProcessing())
+ deferred_task_handler.AddTailProcessingHandler(this);
} else {
DisableOutputs();
}
@@ -470,7 +465,8 @@ void AudioHandler::DisableOutputs() {
}
void AudioHandler::MakeConnection() {
- AtomicIncrement(&connection_ref_count_);
+ Context()->AssertGraphOwner();
+ connection_ref_count_++;
#if DEBUG_AUDIONODE_REFERENCES
fprintf(
@@ -512,8 +508,7 @@ void AudioHandler::BreakConnection() {
void AudioHandler::BreakConnectionWithLock() {
Context()->AssertGraphOwner();
-
- AtomicDecrement(&connection_ref_count_);
+ connection_ref_count_--;
#if DEBUG_AUDIONODE_REFERENCES
fprintf(stderr,
@@ -618,11 +613,11 @@ void AudioNode::Dispose() {
if (context()->HasRealtimeConstraint()) {
// Add the handler to the orphan list if the context is not
- // closed. (Nothing will clean up the orphan list if the context
- // is closed.) These will get cleaned up in the post render task
+ // uninitialized (Nothing will clean up the orphan list if the context
+ // is uninitialized.) These will get cleaned up in the post render task
// if audio thread is running or when the context is colleced (in
// the worst case).
- if (context()->ContextState() != BaseAudioContext::kClosed) {
+ if (!context()->IsContextClosed()) {
context()->GetDeferredTaskHandler().AddRenderingOrphanHandler(
std::move(handler_));
}
@@ -659,16 +654,16 @@ void AudioNode::Trace(blink::Visitor* visitor) {
EventTargetWithInlineData::Trace(visitor);
}
-void AudioNode::HandleChannelOptions(const AudioNodeOptions& options,
+void AudioNode::HandleChannelOptions(const AudioNodeOptions* options,
ExceptionState& exception_state) {
DCHECK(IsMainThread());
- if (options.hasChannelCount())
- setChannelCount(options.channelCount(), exception_state);
- if (options.hasChannelCountMode())
- setChannelCountMode(options.channelCountMode(), exception_state);
- if (options.hasChannelInterpretation())
- setChannelInterpretation(options.channelInterpretation(), exception_state);
+ if (options->hasChannelCount())
+ setChannelCount(options->channelCount(), exception_state);
+ if (options->hasChannelCountMode())
+ setChannelCountMode(options->channelCountMode(), exception_state);
+ if (options->hasChannelInterpretation())
+ setChannelInterpretation(options->channelInterpretation(), exception_state);
}
BaseAudioContext* AudioNode::context() const {
@@ -737,8 +732,10 @@ AudioNode* AudioNode::connect(AudioNode* destination,
destination->Handler()
.Input(input_index)
.Connect(Handler().Output(output_index));
- if (!connected_nodes_[output_index])
- connected_nodes_[output_index] = new HeapHashSet<Member<AudioNode>>();
+ if (!connected_nodes_[output_index]) {
+ connected_nodes_[output_index] =
+ MakeGarbageCollected<HeapHashSet<Member<AudioNode>>>();
+ }
connected_nodes_[output_index]->insert(destination);
Handler().UpdatePullStatusIfNeeded();
@@ -783,8 +780,10 @@ void AudioNode::connect(AudioParam* param,
}
param->Handler().Connect(Handler().Output(output_index));
- if (!connected_params_[output_index])
- connected_params_[output_index] = new HeapHashSet<Member<AudioParam>>();
+ if (!connected_params_[output_index]) {
+ connected_params_[output_index] =
+ MakeGarbageCollected<HeapHashSet<Member<AudioParam>>>();
+ }
connected_params_[output_index]->insert(param);
Handler().UpdatePullStatusIfNeeded();
@@ -1024,11 +1023,11 @@ unsigned AudioNode::numberOfOutputs() const {
return Handler().NumberOfOutputs();
}
-unsigned long AudioNode::channelCount() const {
+unsigned AudioNode::channelCount() const {
return Handler().ChannelCount();
}
-void AudioNode::setChannelCount(unsigned long count,
+void AudioNode::setChannelCount(unsigned count,
ExceptionState& exception_state) {
Handler().SetChannelCount(count, exception_state);
}
@@ -1052,7 +1051,7 @@ void AudioNode::setChannelInterpretation(const String& interpretation,
}
const AtomicString& AudioNode::InterfaceName() const {
- return EventTargetNames::AudioNode;
+ return event_target_names::kAudioNode;
}
ExecutionContext* AudioNode::GetExecutionContext() const {
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_node.h b/chromium/third_party/blink/renderer/modules/webaudio/audio_node.h
index be391ade062..a89174b2969 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/audio_node.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_node.h
@@ -32,6 +32,7 @@
#include "third_party/blink/renderer/modules/modules_export.h"
#include "third_party/blink/renderer/platform/audio/audio_bus.h"
#include "third_party/blink/renderer/platform/audio/audio_utilities.h"
+#include "third_party/blink/renderer/platform/heap/persistent.h"
#include "third_party/blink/renderer/platform/wtf/forward.h"
#include "third_party/blink/renderer/platform/wtf/thread_safe_ref_counted.h"
#include "third_party/blink/renderer/platform/wtf/vector.h"
@@ -108,10 +109,11 @@ class MODULES_EXPORT AudioHandler : public ThreadSafeRefCounted<AudioHandler> {
// Do not release resources used by an audio rendering thread in dispose().
virtual void Dispose();
- // GetNode() returns a valid object until dispose() is called. This returns
- // nullptr after dispose(). We must not call GetNode() in an audio rendering
- // thread.
+ // GetNode() returns a valid object until the AudioNode is collected on the
+ // main thread, and nullptr thereafter. We must not call GetNode() in an audio
+ // rendering thread.
AudioNode* GetNode() const;
+
// context() returns a valid object until the BaseAudioContext dies, and
// returns nullptr otherwise. This always returns a valid object in an audio
// rendering thread, and inside dispose(). We must not call context() in the
@@ -141,12 +143,12 @@ class MODULES_EXPORT AudioHandler : public ThreadSafeRefCounted<AudioHandler> {
// when process() is called. Subclasses will take this input data and put the
// results in the AudioBus(s) of its AudioNodeOutput(s) (if any).
// Called from context's audio thread.
- virtual void Process(size_t frames_to_process) = 0;
+ virtual void Process(uint32_t frames_to_process) = 0;
// Like process(), but only causes the automations to process; the
// normal processing of the node is bypassed. By default, we assume
// no AudioParams need to be updated.
- virtual void ProcessOnlyAudioParams(size_t frames_to_process){};
+ virtual void ProcessOnlyAudioParams(uint32_t frames_to_process){};
// No significant resources should be allocated until initialize() is called.
// Processing may not occur until a node is initialized.
@@ -171,7 +173,7 @@ class MODULES_EXPORT AudioHandler : public ThreadSafeRefCounted<AudioHandler> {
// will only process once per rendering time quantum even if it's called
// repeatedly. This handles the case of "fanout" where an output is connected
// to multiple AudioNode inputs. Called from context's audio thread.
- void ProcessIfNecessary(size_t frames_to_process);
+ void ProcessIfNecessary(uint32_t frames_to_process);
// Called when a new connection has been made to one of our inputs or the
// connection number of channels has changed. This potentially gives us
@@ -220,8 +222,8 @@ class MODULES_EXPORT AudioHandler : public ThreadSafeRefCounted<AudioHandler> {
void DisableOutputsIfNecessary();
void DisableOutputs();
- unsigned long ChannelCount();
- virtual void SetChannelCount(unsigned long, ExceptionState&);
+ unsigned ChannelCount();
+ virtual void SetChannelCount(unsigned, ExceptionState&);
String GetChannelCountMode();
virtual void SetChannelCountMode(const String&, ExceptionState&);
@@ -253,7 +255,7 @@ class MODULES_EXPORT AudioHandler : public ThreadSafeRefCounted<AudioHandler> {
// connected to us to process. Each rendering quantum, the audio data for
// each of the AudioNode's inputs will be available after this method is
// called. Called from context's audio thread.
- virtual void PullInputs(size_t frames_to_process);
+ virtual void PullInputs(uint32_t frames_to_process);
// Force all inputs to take any channel interpretation changes into account.
void UpdateChannelsForInputs();
@@ -261,14 +263,11 @@ class MODULES_EXPORT AudioHandler : public ThreadSafeRefCounted<AudioHandler> {
private:
void SetNodeType(NodeType);
- volatile bool is_initialized_;
+ bool is_initialized_;
NodeType node_type_;
- // The owner AudioNode. This untraced member is safe because dispose() is
- // called before the AudioNode death, and it clears |node_|. Do not access
- // |node_| directly, use GetNode() instead.
- // See http://crbug.com/404527 for the detail.
- UntracedMember<AudioNode> node_;
+ // The owner AudioNode. Accessed only on the main thread.
+ const WeakPersistent<AudioNode> node_;
// This untraced member is safe because this is cleared for all of live
// AudioHandlers when the BaseAudioContext dies. Do not access m_context
@@ -282,7 +281,7 @@ class MODULES_EXPORT AudioHandler : public ThreadSafeRefCounted<AudioHandler> {
double last_processing_time_;
double last_non_silent_time_;
- volatile int connection_ref_count_;
+ int connection_ref_count_;
bool is_disabled_;
@@ -320,7 +319,7 @@ class MODULES_EXPORT AudioNode : public EventTargetWithInlineData {
void Trace(blink::Visitor*) override;
AudioHandler& Handler() const;
- void HandleChannelOptions(const AudioNodeOptions&, ExceptionState&);
+ void HandleChannelOptions(const AudioNodeOptions*, ExceptionState&);
AudioNode* connect(AudioNode*,
unsigned output_index,
@@ -340,8 +339,8 @@ class MODULES_EXPORT AudioNode : public EventTargetWithInlineData {
BaseAudioContext* context() const;
unsigned numberOfInputs() const;
unsigned numberOfOutputs() const;
- unsigned long channelCount() const;
- void setChannelCount(unsigned long, ExceptionState&);
+ unsigned channelCount() const;
+ void setChannelCount(unsigned, ExceptionState&);
String channelCountMode() const;
void setChannelCountMode(const String&, ExceptionState&);
String channelInterpretation() const;
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_node_input.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_node_input.cc
index 0e557c93e54..178b1bf0a39 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/audio_node_input.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_node_input.cc
@@ -38,7 +38,7 @@ inline AudioNodeInput::AudioNodeInput(AudioHandler& handler)
handler_(handler) {
// Set to mono by default.
internal_summing_bus_ =
- AudioBus::Create(1, AudioUtilities::kRenderQuantumFrames);
+ AudioBus::Create(1, audio_utilities::kRenderQuantumFrames);
}
std::unique_ptr<AudioNodeInput> AudioNodeInput::Create(AudioHandler& handler) {
@@ -123,7 +123,7 @@ void AudioNodeInput::UpdateInternalBus() {
return;
internal_summing_bus_ = AudioBus::Create(
- number_of_input_channels, AudioUtilities::kRenderQuantumFrames);
+ number_of_input_channels, audio_utilities::kRenderQuantumFrames);
}
unsigned AudioNodeInput::NumberOfChannels() const {
@@ -168,7 +168,7 @@ AudioBus* AudioNodeInput::InternalSummingBus() {
}
void AudioNodeInput::SumAllConnections(AudioBus* summing_bus,
- size_t frames_to_process) {
+ uint32_t frames_to_process) {
DCHECK(GetDeferredTaskHandler().IsAudioThread());
// We shouldn't be calling this method if there's only one connection, since
@@ -198,7 +198,7 @@ void AudioNodeInput::SumAllConnections(AudioBus* summing_bus,
}
AudioBus* AudioNodeInput::Pull(AudioBus* in_place_bus,
- size_t frames_to_process) {
+ uint32_t frames_to_process) {
DCHECK(GetDeferredTaskHandler().IsAudioThread());
// Handle single connection case.
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_node_input.h b/chromium/third_party/blink/renderer/modules/webaudio/audio_node_input.h
index 85b5048bcb8..ae8a7b66fcf 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/audio_node_input.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_node_input.h
@@ -72,7 +72,7 @@ class AudioNodeInput final : public AudioSummingJunction {
// where possible using inPlaceBus. It returns the bus which it rendered
// into, returning inPlaceBus if in-place processing was performed.
// Called from context's audio thread.
- AudioBus* Pull(AudioBus* in_place_bus, size_t frames_to_process);
+ AudioBus* Pull(AudioBus* in_place_bus, uint32_t frames_to_process);
// bus() contains the rendered audio after pull() has been called for each
// time quantum.
@@ -107,7 +107,7 @@ class AudioNodeInput final : public AudioSummingJunction {
// Called from context's audio thread.
AudioBus* InternalSummingBus();
- void SumAllConnections(AudioBus* summing_bus, size_t frames_to_process);
+ void SumAllConnections(AudioBus* summing_bus, uint32_t frames_to_process);
scoped_refptr<AudioBus> internal_summing_bus_;
};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_node_output.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_node_output.cc
index a90d51e2c62..def68ddfe37 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/audio_node_output.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_node_output.cc
@@ -47,7 +47,7 @@ inline AudioNodeOutput::AudioNodeOutput(AudioHandler* handler,
DCHECK_LE(number_of_channels, BaseAudioContext::MaxNumberOfChannels());
internal_bus_ = AudioBus::Create(number_of_channels,
- AudioUtilities::kRenderQuantumFrames);
+ audio_utilities::kRenderQuantumFrames);
}
std::unique_ptr<AudioNodeOutput> AudioNodeOutput::Create(
@@ -88,7 +88,7 @@ void AudioNodeOutput::UpdateInternalBus() {
return;
internal_bus_ = AudioBus::Create(NumberOfChannels(),
- AudioUtilities::kRenderQuantumFrames);
+ audio_utilities::kRenderQuantumFrames);
}
void AudioNodeOutput::UpdateRenderingState() {
@@ -121,7 +121,7 @@ void AudioNodeOutput::PropagateChannelCount() {
}
AudioBus* AudioNodeOutput::Pull(AudioBus* in_place_bus,
- size_t frames_to_process) {
+ uint32_t frames_to_process) {
DCHECK(GetDeferredTaskHandler().IsAudioThread());
DCHECK(rendering_fan_out_count_ > 0 || rendering_param_fan_out_count_ > 0);
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_node_output.h b/chromium/third_party/blink/renderer/modules/webaudio/audio_node_output.h
index dd76a98dd6c..b8d63521d9d 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/audio_node_output.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_node_output.h
@@ -53,7 +53,7 @@ class AudioNodeOutput final {
// quantum. It returns the bus containing the processed audio for this
// output, returning inPlaceBus if in-place processing was possible. Called
// from context's audio thread.
- AudioBus* Pull(AudioBus* in_place_bus, size_t frames_to_process);
+ AudioBus* Pull(AudioBus* in_place_bus, uint32_t frames_to_process);
// bus() will contain the rendered audio after pull() is called for each
// rendering time quantum.
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_param.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_param.cc
index 24f4820c89b..70af68e84cc 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/audio_param.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_param.cc
@@ -54,7 +54,7 @@ AudioParamHandler::AudioParamHandler(BaseAudioContext& context,
min_value_(min_value),
max_value_(max_value),
summing_bus_(
- AudioBus::Create(1, AudioUtilities::kRenderQuantumFrames, false)) {
+ AudioBus::Create(1, audio_utilities::kRenderQuantumFrames, false)) {
// The destination MUST exist because we need the destination handler for the
// AudioParam.
CHECK(context.destination());
@@ -264,7 +264,7 @@ void AudioParamHandler::CalculateFinalValues(float* values,
// together (unity-gain summing junction). Note that connections would
// normally be mono, but we mix down to mono if necessary.
if (NumberOfRenderingConnections() > 0) {
- DCHECK_LE(number_of_values, AudioUtilities::kRenderQuantumFrames);
+ DCHECK_LE(number_of_values, audio_utilities::kRenderQuantumFrames);
summing_bus_->SetChannelMemory(0, values, number_of_values);
@@ -274,7 +274,7 @@ void AudioParamHandler::CalculateFinalValues(float* values,
// Render audio from this output.
AudioBus* connection_bus =
- output->Pull(nullptr, AudioUtilities::kRenderQuantumFrames);
+ output->Pull(nullptr, audio_utilities::kRenderQuantumFrames);
// Sum, with unity-gain.
summing_bus_->SumFrom(*connection_bus);
@@ -286,7 +286,7 @@ void AudioParamHandler::CalculateTimelineValues(float* values,
unsigned number_of_values) {
// Calculate values for this render quantum. Normally
// |numberOfValues| will equal to
- // AudioUtilities::kRenderQuantumFrames (the render quantum size).
+ // audio_utilities::kRenderQuantumFrames (the render quantum size).
double sample_rate = DestinationHandler().SampleRate();
size_t start_frame = DestinationHandler().CurrentSampleFrame();
size_t end_frame = start_frame + number_of_values;
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_param_map.h b/chromium/third_party/blink/renderer/modules/webaudio/audio_param_map.h
index 052b53b37e4..8d16542493c 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/audio_param_map.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_param_map.h
@@ -26,7 +26,7 @@ class AudioParamMap final : public ScriptWrappable,
const HeapHashMap<String, Member<AudioParam>>& parameter_map);
// IDL attributes / methods
- size_t size() const { return parameter_map_.size(); }
+ uint32_t size() const { return parameter_map_.size(); }
AudioParam* At(String name) { return parameter_map_.at(name); }
bool Contains(String name) { return parameter_map_.Contains(name); }
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_param_timeline.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_param_timeline.cc
index 139b8c50bc2..cd7ad2e7b64 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/audio_param_timeline.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_param_timeline.cc
@@ -642,7 +642,7 @@ bool AudioParamTimeline::HasValues(size_t current_frame,
// Need automation if the event starts somewhere before the
// end of the current render quantum.
return events_[0]->Time() <=
- (current_frame + AudioUtilities::kRenderQuantumFrames) /
+ (current_frame + audio_utilities::kRenderQuantumFrames) /
sample_rate;
default:
// Otherwise, there's some kind of other event running, so we
@@ -675,7 +675,7 @@ void AudioParamTimeline::CancelScheduledValues(
MutexLocker locker(events_lock_);
// Remove all events starting at startTime.
- for (unsigned i = 0; i < events_.size(); ++i) {
+ for (wtf_size_t i = 0; i < events_.size(); ++i) {
if (events_[i]->Time() >= start_time) {
RemoveCancelledEvents(i);
break;
@@ -692,7 +692,7 @@ void AudioParamTimeline::CancelAndHoldAtTime(double cancel_time,
MutexLocker locker(events_lock_);
- unsigned i;
+ wtf_size_t i;
// Find the first event at or just past cancelTime.
for (i = 0; i < events_.size(); ++i) {
if (events_[i]->Time() > cancel_time) {
@@ -702,7 +702,7 @@ void AudioParamTimeline::CancelAndHoldAtTime(double cancel_time,
// The event that is being cancelled. This is the event just past
// cancelTime, if any.
- unsigned cancelled_event_index = i;
+ wtf_size_t cancelled_event_index = i;
// If the event just before cancelTime is a SetTarget or SetValueCurve
// event, we need to handle that event specially instead of the event after.
@@ -821,7 +821,7 @@ float AudioParamTimeline::ValueForContextTime(
double sample_rate = audio_destination.SampleRate();
size_t start_frame = audio_destination.CurrentSampleFrame();
// One parameter change per render quantum.
- double control_rate = sample_rate / AudioUtilities::kRenderQuantumFrames;
+ double control_rate = sample_rate / audio_utilities::kRenderQuantumFrames;
value =
ValuesForFrameRange(start_frame, start_frame + 1, default_value, &value,
1, sample_rate, control_rate, min_value, max_value);
@@ -854,8 +854,8 @@ float AudioParamTimeline::ValuesForFrameRange(size_t start_frame,
number_of_values, sample_rate, control_rate);
// Clamp the values now to the nominal range
- VectorMath::Vclip(values, 1, &min_value, &max_value, values, 1,
- number_of_values);
+ vector_math::Vclip(values, 1, &min_value, &max_value, values, 1,
+ number_of_values);
return last_value;
}
@@ -1212,7 +1212,7 @@ bool AudioParamTimeline::HandleAllEventsInThePast(double current_time,
// the curve, so we don't need to worry that SetValueCurve time is a
// start time, not an end time.
if (last_event_time +
- 1.5 * AudioUtilities::kRenderQuantumFrames / sample_rate <
+ 1.5 * audio_utilities::kRenderQuantumFrames / sample_rate <
current_time) {
// If the last event is SetTarget, make sure we've converged and, that
// we're at least 5 time constants past the start of the event. If not, we
@@ -1291,7 +1291,7 @@ void AudioParamTimeline::ProcessSetTargetFollowedByRamp(
// SetTarget has already started. Update |value| one frame because it's
// the value from the previous frame.
float discrete_time_constant =
- static_cast<float>(AudioUtilities::DiscreteTimeConstantForSampleRate(
+ static_cast<float>(audio_utilities::DiscreteTimeConstantForSampleRate(
event->TimeConstant(), control_rate));
value += (event->Value() - value) * discrete_time_constant;
}
@@ -1560,7 +1560,7 @@ std::tuple<size_t, float, unsigned> AudioParamTimeline::ProcessSetTarget(
float target = value1;
float time_constant = event->TimeConstant();
float discrete_time_constant =
- static_cast<float>(AudioUtilities::DiscreteTimeConstantForSampleRate(
+ static_cast<float>(audio_utilities::DiscreteTimeConstantForSampleRate(
time_constant, control_rate));
// Set the starting value correctly. This is only needed when the
@@ -1864,8 +1864,8 @@ std::tuple<size_t, float, unsigned> AudioParamTimeline::ProcessCancelValues(
float target = events_[event_index - 1]->Value();
float time_constant = events_[event_index - 1]->TimeConstant();
float discrete_time_constant = static_cast<float>(
- AudioUtilities::DiscreteTimeConstantForSampleRate(time_constant,
- control_rate));
+ audio_utilities::DiscreteTimeConstantForSampleRate(time_constant,
+ control_rate));
value += (target - value) * discrete_time_constant;
}
}
@@ -1880,11 +1880,11 @@ std::tuple<size_t, float, unsigned> AudioParamTimeline::ProcessCancelValues(
return std::make_tuple(current_frame, value, write_index);
}
-unsigned AudioParamTimeline::FillWithDefault(float* values,
+uint32_t AudioParamTimeline::FillWithDefault(float* values,
float default_value,
- size_t end_frame,
- unsigned write_index) {
- size_t index = write_index;
+ uint32_t end_frame,
+ uint32_t write_index) {
+ uint32_t index = write_index;
for (; index < end_frame; ++index)
values[index] = default_value;
@@ -1892,11 +1892,12 @@ unsigned AudioParamTimeline::FillWithDefault(float* values,
return index;
}
-void AudioParamTimeline::RemoveCancelledEvents(size_t first_event_to_remove) {
+void AudioParamTimeline::RemoveCancelledEvents(
+ wtf_size_t first_event_to_remove) {
// For all the events that are being removed, also remove that event
// from |new_events_|.
if (new_events_.size() > 0) {
- for (size_t k = first_event_to_remove; k < events_.size(); ++k) {
+ for (wtf_size_t k = first_event_to_remove; k < events_.size(); ++k) {
new_events_.erase(events_[k].get());
}
}
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_param_timeline.h b/chromium/third_party/blink/renderer/modules/webaudio/audio_param_timeline.h
index e0cf3cd4825..34fe05fb890 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/audio_param_timeline.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_param_timeline.h
@@ -440,14 +440,14 @@ class AudioParamTimeline {
// Fill the output vector |values| with the value |defaultValue|,
// starting at |writeIndex| and continuing up to |endFrame|
// (exclusive). |writeIndex| is updated with the new index.
- unsigned FillWithDefault(float* values,
+ uint32_t FillWithDefault(float* values,
float default_value,
- size_t end_frame,
- unsigned write_index);
+ uint32_t end_frame,
+ uint32_t write_index);
// When cancelling events, remove the items from |events_| starting
// at the given index. Update |new_events_| too.
- void RemoveCancelledEvents(size_t first_event_to_remove);
+ void RemoveCancelledEvents(wtf_size_t first_event_to_remove);
// Vector of all automation events for the AudioParam. Access must
// be locked via m_eventsLock.
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_processing_event.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_processing_event.cc
index b580ddffe2a..c5629a61be9 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/audio_processing_event.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_processing_event.cc
@@ -42,7 +42,7 @@ AudioProcessingEvent* AudioProcessingEvent::Create(AudioBuffer* input_buffer,
AudioProcessingEvent* AudioProcessingEvent::Create(
const AtomicString& type,
- const AudioProcessingEventInit& initializer) {
+ const AudioProcessingEventInit* initializer) {
return new AudioProcessingEvent(type, initializer);
}
@@ -51,24 +51,24 @@ AudioProcessingEvent::AudioProcessingEvent() = default;
AudioProcessingEvent::AudioProcessingEvent(AudioBuffer* input_buffer,
AudioBuffer* output_buffer,
double playback_time)
- : Event(EventTypeNames::audioprocess, Bubbles::kYes, Cancelable::kNo),
+ : Event(event_type_names::kAudioprocess, Bubbles::kYes, Cancelable::kNo),
input_buffer_(input_buffer),
output_buffer_(output_buffer),
playback_time_(playback_time) {}
AudioProcessingEvent::AudioProcessingEvent(
const AtomicString& type,
- const AudioProcessingEventInit& initializer)
+ const AudioProcessingEventInit* initializer)
: Event(type, initializer) {
- input_buffer_ = initializer.inputBuffer();
- output_buffer_ = initializer.outputBuffer();
- playback_time_ = initializer.playbackTime();
+ input_buffer_ = initializer->inputBuffer();
+ output_buffer_ = initializer->outputBuffer();
+ playback_time_ = initializer->playbackTime();
}
AudioProcessingEvent::~AudioProcessingEvent() = default;
const AtomicString& AudioProcessingEvent::InterfaceName() const {
- return EventNames::AudioProcessingEvent;
+ return event_interface_names::kAudioProcessingEvent;
}
void AudioProcessingEvent::Trace(blink::Visitor* visitor) {
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_processing_event.h b/chromium/third_party/blink/renderer/modules/webaudio/audio_processing_event.h
index 4c31db8f0d0..705a3d448bb 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/audio_processing_event.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_processing_event.h
@@ -46,7 +46,7 @@ class AudioProcessingEvent final : public Event {
double playback_time);
static AudioProcessingEvent* Create(const AtomicString& type,
- const AudioProcessingEventInit&);
+ const AudioProcessingEventInit*);
~AudioProcessingEvent() override;
@@ -64,7 +64,7 @@ class AudioProcessingEvent final : public Event {
AudioBuffer* output_buffer,
double playback_time);
AudioProcessingEvent(const AtomicString& type,
- const AudioProcessingEventInit&);
+ const AudioProcessingEventInit*);
Member<AudioBuffer> input_buffer_;
Member<AudioBuffer> output_buffer_;
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_scheduled_source_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_scheduled_source_node.cc
index 4c6c0e4baa3..767cd033749 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/audio_scheduled_source_node.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_scheduled_source_node.cc
@@ -34,7 +34,7 @@
#include "third_party/blink/renderer/platform/bindings/exception_messages.h"
#include "third_party/blink/renderer/platform/bindings/exception_state.h"
#include "third_party/blink/renderer/platform/cross_thread_functional.h"
-#include "third_party/blink/renderer/platform/web_task_runner.h"
+#include "third_party/blink/renderer/platform/scheduler/public/post_cross_thread_task.h"
#include "third_party/blink/renderer/platform/wtf/math_extras.h"
namespace blink {
@@ -69,8 +69,8 @@ AudioScheduledSourceHandler::UpdateSchedulingInfo(size_t quantum_frame_size,
}
DCHECK_EQ(quantum_frame_size,
- static_cast<size_t>(AudioUtilities::kRenderQuantumFrames));
- if (quantum_frame_size != AudioUtilities::kRenderQuantumFrames) {
+ static_cast<size_t>(audio_utilities::kRenderQuantumFrames));
+ if (quantum_frame_size != audio_utilities::kRenderQuantumFrames) {
return std::make_tuple(quantum_frame_offset, non_silent_frames_to_process,
start_frame_offset);
}
@@ -84,11 +84,11 @@ AudioScheduledSourceHandler::UpdateSchedulingInfo(size_t quantum_frame_size,
size_t quantum_start_frame = Context()->CurrentSampleFrame();
size_t quantum_end_frame = quantum_start_frame + quantum_frame_size;
size_t start_frame =
- AudioUtilities::TimeToSampleFrame(start_time_, sample_rate);
+ audio_utilities::TimeToSampleFrame(start_time_, sample_rate);
size_t end_frame =
end_time_ == kUnknownTime
? 0
- : AudioUtilities::TimeToSampleFrame(end_time_, sample_rate);
+ : audio_utilities::TimeToSampleFrame(end_time_, sample_rate);
// If we know the end time and it's already passed, then don't bother doing
// any more rendering this cycle.
@@ -252,7 +252,7 @@ void AudioScheduledSourceHandler::NotifyEnded() {
if (!Context() || !Context()->GetExecutionContext())
return;
if (GetNode())
- GetNode()->DispatchEvent(*Event::Create(EventTypeNames::ended));
+ GetNode()->DispatchEvent(*Event::Create(event_type_names::kEnded));
}
// ----------------------------------------------------------------
@@ -284,11 +284,11 @@ void AudioScheduledSourceNode::stop(double when,
}
EventListener* AudioScheduledSourceNode::onended() {
- return GetAttributeEventListener(EventTypeNames::ended);
+ return GetAttributeEventListener(event_type_names::kEnded);
}
void AudioScheduledSourceNode::setOnended(EventListener* listener) {
- SetAttributeEventListener(EventTypeNames::ended, listener);
+ SetAttributeEventListener(event_type_names::kEnded, listener);
}
bool AudioScheduledSourceNode::HasPendingActivity() const {
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_global_scope.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_global_scope.cc
index 718ef75dbb8..b5f727fb2d5 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_global_scope.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_global_scope.cc
@@ -110,7 +110,7 @@ void AudioWorkletGlobalScope::registerProcessor(
// of |AudioParamDescriptor| and pass it to the definition.
if (did_get_parameter_descriptor &&
!parameter_descriptors_value_local->IsNullOrUndefined()) {
- HeapVector<AudioParamDescriptor> audio_param_descriptors =
+ HeapVector<Member<AudioParamDescriptor>> audio_param_descriptors =
NativeValueTraits<IDLSequence<AudioParamDescriptor>>::NativeValue(
isolate, parameter_descriptors_value_local, exception_state);
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_global_scope.h b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_global_scope.h
index ce29470c280..d6154430dff 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_global_scope.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_global_scope.h
@@ -12,6 +12,7 @@
#include "third_party/blink/renderer/modules/webaudio/audio_param_descriptor.h"
#include "third_party/blink/renderer/platform/audio/audio_array.h"
#include "third_party/blink/renderer/platform/bindings/script_wrappable.h"
+#include "third_party/blink/renderer/platform/wtf/casting.h"
namespace blink {
@@ -126,11 +127,12 @@ class MODULES_EXPORT AudioWorkletGlobalScope final : public WorkletGlobalScope {
float sample_rate_ = 0.0;
};
-DEFINE_TYPE_CASTS(AudioWorkletGlobalScope,
- ExecutionContext,
- context,
- context->IsAudioWorkletGlobalScope(),
- context.IsAudioWorkletGlobalScope());
+template <>
+struct DowncastTraits<AudioWorkletGlobalScope> {
+ static bool AllowFrom(const ExecutionContext& context) {
+ return context.IsAudioWorkletGlobalScope();
+ }
+};
} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_global_scope_test.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_global_scope_test.cc
index 80b166f42dd..c60b0414d34 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_global_scope_test.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_global_scope_test.cc
@@ -21,6 +21,7 @@
#include "third_party/blink/renderer/bindings/core/v8/v8_gc_controller.h"
#include "third_party/blink/renderer/bindings/core/v8/worker_or_worklet_script_controller.h"
#include "third_party/blink/renderer/core/dom/document.h"
+#include "third_party/blink/renderer/core/inspector/worker_devtools_params.h"
#include "third_party/blink/renderer/core/messaging/message_channel.h"
#include "third_party/blink/renderer/core/messaging/message_port.h"
#include "third_party/blink/renderer/core/origin_trials/origin_trial_context.h"
@@ -28,7 +29,6 @@
#include "third_party/blink/renderer/core/testing/page_test_base.h"
#include "third_party/blink/renderer/core/workers/global_scope_creation_params.h"
#include "third_party/blink/renderer/core/workers/worker_backing_thread.h"
-#include "third_party/blink/renderer/core/workers/worker_inspector_proxy.h"
#include "third_party/blink/renderer/core/workers/worker_reporting_proxy.h"
#include "third_party/blink/renderer/core/workers/worklet_module_responses_map.h"
#include "third_party/blink/renderer/modules/webaudio/audio_buffer.h"
@@ -39,7 +39,6 @@
#include "third_party/blink/renderer/platform/bindings/script_state.h"
#include "third_party/blink/renderer/platform/bindings/v8_binding_macros.h"
#include "third_party/blink/renderer/platform/bindings/v8_object_constructor.h"
-#include "third_party/blink/renderer/platform/loader/fetch/access_control_status.h"
#include "third_party/blink/renderer/platform/loader/fetch/resource_loader_options.h"
#include "third_party/blink/renderer/platform/weborigin/security_origin.h"
#include "third_party/blink/renderer/platform/wtf/text/text_position.h"
@@ -69,15 +68,15 @@ class AudioWorkletGlobalScopeTest : public PageTestBase {
Document* document = &GetDocument();
thread->Start(
std::make_unique<GlobalScopeCreationParams>(
- document->Url(), ScriptType::kModule, document->UserAgent(),
- Vector<CSPHeaderAndType>(), document->GetReferrerPolicy(),
- document->GetSecurityOrigin(), document->IsSecureContext(),
- document->GetHttpsState(), nullptr /* worker_clients */,
- document->AddressSpace(),
+ document->Url(), mojom::ScriptType::kModule, document->UserAgent(),
+ nullptr /* web_worker_fetch_context */, Vector<CSPHeaderAndType>(),
+ document->GetReferrerPolicy(), document->GetSecurityOrigin(),
+ document->IsSecureContext(), document->GetHttpsState(),
+ nullptr /* worker_clients */, document->AddressSpace(),
OriginTrialContext::GetTokens(document).get(),
base::UnguessableToken::Create(), nullptr /* worker_settings */,
kV8CacheOptionsDefault, new WorkletModuleResponsesMap),
- base::nullopt, WorkerInspectorProxy::PauseOnWorkerStart::kDontPause,
+ base::nullopt, std::make_unique<WorkerDevToolsParams>(),
ParentExecutionContextTaskRunners::Create());
return thread;
}
@@ -137,8 +136,8 @@ class AudioWorkletGlobalScopeTest : public PageTestBase {
KURL js_url("https://example.com/worklet.js");
ScriptModule module = ScriptModule::Compile(
script_state->GetIsolate(), source_code, js_url, js_url,
- ScriptFetchOptions(), kSharableCrossOrigin,
- TextPosition::MinimumPosition(), ASSERT_NO_EXCEPTION);
+ ScriptFetchOptions(), TextPosition::MinimumPosition(),
+ ASSERT_NO_EXCEPTION);
EXPECT_FALSE(module.IsNull());
ScriptValue exception = module.Instantiate(script_state);
EXPECT_TRUE(exception.IsEmpty());
@@ -154,7 +153,7 @@ class AudioWorkletGlobalScopeTest : public PageTestBase {
WaitableEvent* wait_event) {
EXPECT_TRUE(thread->IsCurrentThread());
- auto* global_scope = ToAudioWorkletGlobalScope(thread->GlobalScope());
+ auto* global_scope = To<AudioWorkletGlobalScope>(thread->GlobalScope());
ScriptState* script_state =
global_scope->ScriptController()->GetScriptState();
@@ -202,7 +201,7 @@ class AudioWorkletGlobalScopeTest : public PageTestBase {
WaitableEvent* wait_event) {
EXPECT_TRUE(thread->IsCurrentThread());
- auto* global_scope = ToAudioWorkletGlobalScope(thread->GlobalScope());
+ auto* global_scope = To<AudioWorkletGlobalScope>(thread->GlobalScope());
ScriptState* script_state =
global_scope->ScriptController()->GetScriptState();
@@ -257,7 +256,7 @@ class AudioWorkletGlobalScopeTest : public PageTestBase {
WaitableEvent* wait_event) {
EXPECT_TRUE(thread->IsCurrentThread());
- auto* global_scope = ToAudioWorkletGlobalScope(thread->GlobalScope());
+ auto* global_scope = To<AudioWorkletGlobalScope>(thread->GlobalScope());
ScriptState* script_state =
global_scope->ScriptController()->GetScriptState();
@@ -323,7 +322,7 @@ class AudioWorkletGlobalScopeTest : public PageTestBase {
WaitableEvent* wait_event) {
EXPECT_TRUE(thread->IsCurrentThread());
- auto* global_scope = ToAudioWorkletGlobalScope(thread->GlobalScope());
+ auto* global_scope = To<AudioWorkletGlobalScope>(thread->GlobalScope());
ScriptState* script_state =
global_scope->ScriptController()->GetScriptState();
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_messaging_proxy.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_messaging_proxy.cc
index b9d26b3fa31..63750f23866 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_messaging_proxy.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_messaging_proxy.cc
@@ -4,6 +4,8 @@
#include "third_party/blink/renderer/modules/webaudio/audio_worklet_messaging_proxy.h"
+#include <utility>
+
#include "third_party/blink/public/platform/task_type.h"
#include "third_party/blink/renderer/bindings/core/v8/serialization/serialized_script_value.h"
#include "third_party/blink/renderer/core/messaging/message_port.h"
@@ -47,9 +49,9 @@ void AudioWorkletMessagingProxy::CreateProcessorOnRenderingThread(
scoped_refptr<SerializedScriptValue> node_options) {
DCHECK(worker_thread->IsCurrentThread());
AudioWorkletGlobalScope* global_scope =
- ToAudioWorkletGlobalScope(worker_thread->GlobalScope());
- AudioWorkletProcessor* processor =
- global_scope->CreateProcessor(name, message_port_channel, node_options);
+ To<AudioWorkletGlobalScope>(worker_thread->GlobalScope());
+ AudioWorkletProcessor* processor = global_scope->CreateProcessor(
+ name, message_port_channel, std::move(node_options));
handler->SetProcessorOnRenderThread(processor);
}
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_node.cc
index cee79bc7253..055cb2b54a2 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_node.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_node.cc
@@ -22,6 +22,7 @@
#include "third_party/blink/renderer/platform/bindings/exception_messages.h"
#include "third_party/blink/renderer/platform/cross_thread_functional.h"
#include "third_party/blink/renderer/platform/heap/persistent.h"
+#include "third_party/blink/renderer/platform/scheduler/public/post_cross_thread_task.h"
namespace blink {
@@ -30,32 +31,30 @@ AudioWorkletHandler::AudioWorkletHandler(
float sample_rate,
String name,
HashMap<String, scoped_refptr<AudioParamHandler>> param_handler_map,
- const AudioWorkletNodeOptions& options)
+ const AudioWorkletNodeOptions* options)
: AudioHandler(kNodeTypeAudioWorklet, node, sample_rate),
name_(name),
param_handler_map_(param_handler_map) {
DCHECK(IsMainThread());
for (const auto& param_name : param_handler_map_.Keys()) {
- param_value_map_.Set(
- param_name,
- std::make_unique<AudioFloatArray>(
- AudioUtilities::kRenderQuantumFrames));
+ param_value_map_.Set(param_name,
+ std::make_unique<AudioFloatArray>(
+ audio_utilities::kRenderQuantumFrames));
}
- for (unsigned i = 0; i < options.numberOfInputs(); ++i) {
+ for (unsigned i = 0; i < options->numberOfInputs(); ++i) {
AddInput();
}
- if (options.hasOutputChannelCount()) {
+ if (options->hasOutputChannelCount()) {
is_output_channel_count_given_ = true;
}
- for (unsigned i = 0; i < options.numberOfOutputs(); ++i) {
- // If |options.outputChannelCount| unspecified, all outputs are mono.
- AddOutput(is_output_channel_count_given_
- ? options.outputChannelCount()[i]
- : 1);
+ for (unsigned i = 0; i < options->numberOfOutputs(); ++i) {
+ // If |options->outputChannelCount| unspecified, all outputs are mono.
+ AddOutput(is_output_channel_count_given_ ? options->outputChannelCount()[i]
+ : 1);
}
if (Context()->GetExecutionContext()) {
@@ -78,12 +77,12 @@ scoped_refptr<AudioWorkletHandler> AudioWorkletHandler::Create(
float sample_rate,
String name,
HashMap<String, scoped_refptr<AudioParamHandler>> param_handler_map,
- const AudioWorkletNodeOptions& options) {
+ const AudioWorkletNodeOptions* options) {
return base::AdoptRef(new AudioWorkletHandler(node, sample_rate, name,
param_handler_map, options));
}
-void AudioWorkletHandler::Process(size_t frames_to_process) {
+void AudioWorkletHandler::Process(uint32_t frames_to_process) {
DCHECK(Context()->IsAudioThread());
// Render and update the node state when the processor is ready with no error.
@@ -106,7 +105,7 @@ void AudioWorkletHandler::Process(size_t frames_to_process) {
AudioFloatArray* param_values = param_value_map_.at(param_name);
if (param_handler->HasSampleAccurateValues()) {
param_handler->CalculateSampleAccurateValues(
- param_values->Data(), frames_to_process);
+ param_values->Data(), static_cast<uint32_t>(frames_to_process));
} else {
std::fill(param_values->Data(),
param_values->Data() + frames_to_process,
@@ -218,11 +217,10 @@ void AudioWorkletHandler::NotifyProcessorError(
AudioWorkletNode::AudioWorkletNode(
BaseAudioContext& context,
const String& name,
- const AudioWorkletNodeOptions& options,
+ const AudioWorkletNodeOptions* options,
const Vector<CrossThreadAudioParamInfo> param_info_list,
MessagePort* node_port)
- : AudioNode(context),
- node_port_(node_port) {
+ : AudioNode(context), node_port_(node_port) {
HeapHashMap<String, Member<AudioParam>> audio_param_map;
HashMap<String, scoped_refptr<AudioParamHandler>> param_handler_map;
for (const auto& param_info : param_info_list) {
@@ -237,8 +235,8 @@ AudioWorkletNode::AudioWorkletNode(
audio_param_map.Set(param_name, audio_param);
param_handler_map.Set(param_name, WrapRefCounted(&audio_param->Handler()));
- if (options.hasParameterData()) {
- for (const auto& key_value_pair : options.parameterData()) {
+ if (options->hasParameterData()) {
+ for (const auto& key_value_pair : options->parameterData()) {
if (key_value_pair.first == param_name)
audio_param->setValue(key_value_pair.second);
}
@@ -257,7 +255,7 @@ AudioWorkletNode* AudioWorkletNode::Create(
ScriptState* script_state,
BaseAudioContext* context,
const String& name,
- const AudioWorkletNodeOptions& options,
+ const AudioWorkletNodeOptions* options,
ExceptionState& exception_state) {
DCHECK(IsMainThread());
@@ -266,7 +264,7 @@ AudioWorkletNode* AudioWorkletNode::Create(
return nullptr;
}
- if (options.numberOfInputs() == 0 && options.numberOfOutputs() == 0) {
+ if (options->numberOfInputs() == 0 && options->numberOfOutputs() == 0) {
exception_state.ThrowDOMException(
DOMExceptionCode::kNotSupportedError,
"AudioWorkletNode cannot be created: Number of inputs and number of "
@@ -274,19 +272,19 @@ AudioWorkletNode* AudioWorkletNode::Create(
return nullptr;
}
- if (options.hasOutputChannelCount()) {
- if (options.numberOfOutputs() != options.outputChannelCount().size()) {
+ if (options->hasOutputChannelCount()) {
+ if (options->numberOfOutputs() != options->outputChannelCount().size()) {
exception_state.ThrowDOMException(
DOMExceptionCode::kIndexSizeError,
"AudioWorkletNode cannot be created: Length of specified "
"'outputChannelCount' (" +
- String::Number(options.outputChannelCount().size()) +
+ String::Number(options->outputChannelCount().size()) +
") does not match the given number of outputs (" +
- String::Number(options.numberOfOutputs()) + ").");
+ String::Number(options->numberOfOutputs()) + ").");
return nullptr;
}
- for (const auto& channel_count : options.outputChannelCount()) {
+ for (const auto& channel_count : options->outputChannelCount()) {
if (channel_count < 1 ||
channel_count > BaseAudioContext::MaxNumberOfChannels()) {
exception_state.ThrowDOMException(
@@ -384,7 +382,7 @@ MessagePort* AudioWorkletNode::port() const {
}
void AudioWorkletNode::FireProcessorError() {
- DispatchEvent(*Event::Create(EventTypeNames::processorerror));
+ DispatchEvent(*Event::Create(event_type_names::kProcessorerror));
}
scoped_refptr<AudioWorkletHandler> AudioWorkletNode::GetWorkletHandler() const {
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_node.h b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_node.h
index 058202c2392..5a67d491922 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_node.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_node.h
@@ -38,12 +38,12 @@ class AudioWorkletHandler final : public AudioHandler {
float sample_rate,
String name,
HashMap<String, scoped_refptr<AudioParamHandler>> param_handler_map,
- const AudioWorkletNodeOptions&);
+ const AudioWorkletNodeOptions*);
~AudioWorkletHandler() override;
// Called from render thread.
- void Process(size_t frames_to_process) override;
+ void Process(uint32_t frames_to_process) override;
void CheckNumberOfChannelsForInput(AudioNodeInput*) override;
@@ -68,7 +68,7 @@ class AudioWorkletHandler final : public AudioHandler {
float sample_rate,
String name,
HashMap<String, scoped_refptr<AudioParamHandler>> param_handler_map,
- const AudioWorkletNodeOptions&);
+ const AudioWorkletNodeOptions*);
String name_;
@@ -99,7 +99,7 @@ class AudioWorkletNode final : public AudioNode,
static AudioWorkletNode* Create(ScriptState*,
BaseAudioContext*,
const String& name,
- const AudioWorkletNodeOptions&,
+ const AudioWorkletNodeOptions*,
ExceptionState&);
// ActiveScriptWrappable
@@ -108,7 +108,7 @@ class AudioWorkletNode final : public AudioNode,
// IDL
AudioParamMap* parameters() const;
MessagePort* port() const;
- DEFINE_ATTRIBUTE_EVENT_LISTENER(processorerror);
+ DEFINE_ATTRIBUTE_EVENT_LISTENER(processorerror, kProcessorerror);
void FireProcessorError();
@@ -117,7 +117,7 @@ class AudioWorkletNode final : public AudioNode,
private:
AudioWorkletNode(BaseAudioContext&,
const String& name,
- const AudioWorkletNodeOptions&,
+ const AudioWorkletNodeOptions*,
const Vector<CrossThreadAudioParamInfo>,
MessagePort* node_port);
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_object_proxy.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_object_proxy.cc
index 04ee72e61eb..e9567d27c56 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_object_proxy.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_object_proxy.cc
@@ -24,7 +24,7 @@ AudioWorkletObjectProxy::AudioWorkletObjectProxy(
void AudioWorkletObjectProxy::DidCreateWorkerGlobalScope(
WorkerOrWorkletGlobalScope* global_scope) {
- global_scope_ = ToAudioWorkletGlobalScope(global_scope);
+ global_scope_ = To<AudioWorkletGlobalScope>(global_scope);
global_scope_->SetSampleRate(context_sample_rate_);
}
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_processor.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_processor.cc
index a197e21a6b8..67fa0edf9ce 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_processor.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_processor.cc
@@ -13,7 +13,7 @@ namespace blink {
AudioWorkletProcessor* AudioWorkletProcessor::Create(
ExecutionContext* context) {
- AudioWorkletGlobalScope* global_scope = ToAudioWorkletGlobalScope(context);
+ AudioWorkletGlobalScope* global_scope = To<AudioWorkletGlobalScope>(context);
DCHECK(global_scope);
DCHECK(global_scope->IsContextThread());
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_processor_definition.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_processor_definition.cc
index b3cc204dc0c..80baa713901 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_processor_definition.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_processor_definition.cc
@@ -40,15 +40,15 @@ v8::Local<v8::Function> AudioWorkletProcessorDefinition::ProcessLocal(
}
void AudioWorkletProcessorDefinition::SetAudioParamDescriptors(
- const HeapVector<AudioParamDescriptor>& descriptors) {
+ const HeapVector<Member<AudioParamDescriptor>>& descriptors) {
audio_param_descriptors_ = descriptors;
}
const Vector<String>
AudioWorkletProcessorDefinition::GetAudioParamDescriptorNames() const {
Vector<String> names;
- for (const auto& descriptor : audio_param_descriptors_) {
- names.push_back(descriptor.name());
+ for (const auto descriptor : audio_param_descriptors_) {
+ names.push_back(descriptor->name());
}
return names;
}
@@ -56,9 +56,9 @@ const Vector<String>
const AudioParamDescriptor*
AudioWorkletProcessorDefinition::GetAudioParamDescriptor (
const String& key) const {
- for (const auto& descriptor : audio_param_descriptors_) {
- if (descriptor.name() == key)
- return &descriptor;
+ for (const auto descriptor : audio_param_descriptors_) {
+ if (descriptor->name() == key)
+ return descriptor;
}
return nullptr;
}
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_processor_definition.h b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_processor_definition.h
index b5100163ee7..d54fdb20ed1 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_processor_definition.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_processor_definition.h
@@ -37,7 +37,8 @@ class MODULES_EXPORT AudioWorkletProcessorDefinition final
const String& GetName() const { return name_; }
v8::Local<v8::Object> ConstructorLocal(v8::Isolate*);
v8::Local<v8::Function> ProcessLocal(v8::Isolate*);
- void SetAudioParamDescriptors(const HeapVector<AudioParamDescriptor>&);
+ void SetAudioParamDescriptors(
+ const HeapVector<Member<AudioParamDescriptor>>&);
const Vector<String> GetAudioParamDescriptorNames() const;
const AudioParamDescriptor* GetAudioParamDescriptor(const String& key) const;
@@ -70,7 +71,7 @@ class MODULES_EXPORT AudioWorkletProcessorDefinition final
TraceWrapperV8Reference<v8::Object> constructor_;
TraceWrapperV8Reference<v8::Function> process_;
- HeapVector<AudioParamDescriptor> audio_param_descriptors_;
+ HeapVector<Member<AudioParamDescriptor>> audio_param_descriptors_;
};
} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_thread_test.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_thread_test.cc
index 076dbbfff1b..97250df4c50 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_thread_test.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_thread_test.cc
@@ -15,18 +15,17 @@
#include "third_party/blink/renderer/bindings/core/v8/v8_gc_controller.h"
#include "third_party/blink/renderer/bindings/core/v8/worker_or_worklet_script_controller.h"
#include "third_party/blink/renderer/core/inspector/console_message.h"
+#include "third_party/blink/renderer/core/inspector/worker_devtools_params.h"
#include "third_party/blink/renderer/core/origin_trials/origin_trial_context.h"
#include "third_party/blink/renderer/core/script/script.h"
#include "third_party/blink/renderer/core/testing/page_test_base.h"
#include "third_party/blink/renderer/core/workers/global_scope_creation_params.h"
#include "third_party/blink/renderer/core/workers/worker_backing_thread.h"
-#include "third_party/blink/renderer/core/workers/worker_inspector_proxy.h"
#include "third_party/blink/renderer/core/workers/worker_or_worklet_global_scope.h"
#include "third_party/blink/renderer/core/workers/worker_reporting_proxy.h"
#include "third_party/blink/renderer/core/workers/worklet_module_responses_map.h"
#include "third_party/blink/renderer/platform/cross_thread_functional.h"
#include "third_party/blink/renderer/platform/heap/handle.h"
-#include "third_party/blink/renderer/platform/loader/fetch/access_control_status.h"
#include "third_party/blink/renderer/platform/loader/fetch/resource_loader_options.h"
#include "third_party/blink/renderer/platform/testing/testing_platform_support.h"
#include "third_party/blink/renderer/platform/testing/unit_test_helpers.h"
@@ -53,15 +52,15 @@ class AudioWorkletThreadTest : public PageTestBase {
Document* document = &GetDocument();
thread->Start(
std::make_unique<GlobalScopeCreationParams>(
- document->Url(), ScriptType::kModule, document->UserAgent(),
- Vector<CSPHeaderAndType>(), document->GetReferrerPolicy(),
- document->GetSecurityOrigin(), document->IsSecureContext(),
- document->GetHttpsState(), nullptr /* worker_clients */,
- document->AddressSpace(),
+ document->Url(), mojom::ScriptType::kModule, document->UserAgent(),
+ nullptr /* web_worker_fetch_context */, Vector<CSPHeaderAndType>(),
+ document->GetReferrerPolicy(), document->GetSecurityOrigin(),
+ document->IsSecureContext(), document->GetHttpsState(),
+ nullptr /* worker_clients */, document->AddressSpace(),
OriginTrialContext::GetTokens(document).get(),
base::UnguessableToken::Create(), nullptr /* worker_settings */,
kV8CacheOptionsDefault, new WorkletModuleResponsesMap),
- base::nullopt, WorkerInspectorProxy::PauseOnWorkerStart::kDontPause,
+ base::nullopt, std::make_unique<WorkerDevToolsParams>(),
ParentExecutionContextTaskRunners::Create());
return thread;
}
@@ -87,8 +86,8 @@ class AudioWorkletThreadTest : public PageTestBase {
KURL js_url("https://example.com/worklet.js");
ScriptModule module = ScriptModule::Compile(
script_state->GetIsolate(), "var counter = 0; ++counter;", js_url,
- js_url, ScriptFetchOptions(), kSharableCrossOrigin,
- TextPosition::MinimumPosition(), ASSERT_NO_EXCEPTION);
+ js_url, ScriptFetchOptions(), TextPosition::MinimumPosition(),
+ ASSERT_NO_EXCEPTION);
EXPECT_FALSE(module.IsNull());
ScriptValue exception = module.Instantiate(script_state);
EXPECT_TRUE(exception.IsEmpty());
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/base_audio_context.cc b/chromium/third_party/blink/renderer/modules/webaudio/base_audio_context.cc
index 64d9b51b82e..2102fef2103 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/base_audio_context.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/base_audio_context.cc
@@ -79,7 +79,7 @@ namespace blink {
BaseAudioContext* BaseAudioContext::Create(
Document& document,
- const AudioContextOptions& context_options,
+ const AudioContextOptions* context_options,
ExceptionState& exception_state) {
return AudioContext::Create(document, context_options, exception_state);
}
@@ -205,7 +205,7 @@ void BaseAudioContext::ThrowExceptionForClosedState(
}
AudioBuffer* BaseAudioContext::createBuffer(unsigned number_of_channels,
- size_t number_of_frames,
+ uint32_t number_of_frames,
float sample_rate,
ExceptionState& exception_state) {
// It's ok to call createBuffer, even if the context is closed because the
@@ -225,8 +225,8 @@ AudioBuffer* BaseAudioContext::createBuffer(unsigned number_of_channels,
("WebAudio.AudioBuffer.Length", 1, 1000000, 50));
// The limits are the min and max AudioBuffer sample rates currently
// supported. We use explicit values here instead of
- // AudioUtilities::minAudioBufferSampleRate() and
- // AudioUtilities::maxAudioBufferSampleRate(). The number of buckets is
+ // audio_utilities::minAudioBufferSampleRate() and
+ // audio_utilities::maxAudioBufferSampleRate(). The number of buckets is
// fairly arbitrary.
DEFINE_STATIC_LOCAL(
CustomCountHistogram, audio_buffer_sample_rate_histogram,
@@ -514,11 +514,11 @@ PeriodicWave* BaseAudioContext::createPeriodicWave(
PeriodicWave* BaseAudioContext::createPeriodicWave(
const Vector<float>& real,
const Vector<float>& imag,
- const PeriodicWaveConstraints& options,
+ const PeriodicWaveConstraints* options,
ExceptionState& exception_state) {
DCHECK(IsMainThread());
- bool disable = options.disableNormalization();
+ bool disable = options->disableNormalization();
return PeriodicWave::Create(*this, real, imag, disable, exception_state);
}
@@ -605,12 +605,16 @@ void BaseAudioContext::SetContextState(AudioContextState new_state) {
// notification is required when the context gets suspended or closed.
if (was_audible_ && context_state_ != kRunning) {
was_audible_ = false;
- PostCrossThreadTask(
- *task_runner_, FROM_HERE,
- CrossThreadBind(&BaseAudioContext::NotifyAudibleAudioStopped,
- WrapCrossThreadPersistent(this)));
+ GetExecutionContext()
+ ->GetTaskRunner(TaskType::kMediaElementEvent)
+ ->PostTask(FROM_HERE,
+ WTF::Bind(&BaseAudioContext::NotifyAudibleAudioStopped,
+ WrapPersistent(this)));
}
+ if (new_state == kClosed)
+ GetDeferredTaskHandler().StopAcceptingTailProcessing();
+
// Notify context that state changed
if (GetExecutionContext()) {
GetExecutionContext()
@@ -621,7 +625,7 @@ void BaseAudioContext::SetContextState(AudioContextState new_state) {
}
void BaseAudioContext::NotifyStateChange() {
- DispatchEvent(*Event::Create(EventTypeNames::statechange));
+ DispatchEvent(*Event::Create(event_type_names::kStatechange));
}
void BaseAudioContext::NotifySourceNodeFinishedProcessing(
@@ -697,7 +701,7 @@ static bool IsAudible(const AudioBus* rendered_data) {
for (unsigned k = 0; k < rendered_data->NumberOfChannels(); ++k) {
const float* data = rendered_data->Channel(k)->Data();
float channel_energy;
- VectorMath::Vsvesq(data, 1, &channel_energy, data_size);
+ vector_math::Vsvesq(data, 1, &channel_energy, data_size);
energy += channel_energy;
}
@@ -852,7 +856,7 @@ void BaseAudioContext::RejectPendingDecodeAudioDataResolvers() {
decode_audio_resolvers_.clear();
}
-AudioIOPosition BaseAudioContext::OutputPosition() {
+AudioIOPosition BaseAudioContext::OutputPosition() const {
DCHECK(IsMainThread());
GraphAutoLocker locker(this);
return output_position_;
@@ -875,7 +879,7 @@ void BaseAudioContext::RejectPendingResolvers() {
}
const AtomicString& BaseAudioContext::InterfaceName() const {
- return EventTargetNames::AudioContext;
+ return event_target_names::kAudioContext;
}
ExecutionContext* BaseAudioContext::GetExecutionContext() const {
@@ -950,7 +954,7 @@ void BaseAudioContext::UpdateWorkletGlobalScopeOnRenderingThread() {
if (TryLock()) {
if (audio_worklet_thread_) {
AudioWorkletGlobalScope* global_scope =
- ToAudioWorkletGlobalScope(audio_worklet_thread_->GlobalScope());
+ To<AudioWorkletGlobalScope>(audio_worklet_thread_->GlobalScope());
DCHECK(global_scope);
global_scope->SetCurrentFrame(CurrentSampleFrame());
}
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/base_audio_context.h b/chromium/third_party/blink/renderer/modules/webaudio/base_audio_context.h
index ae933f20167..d2459fd07be 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/base_audio_context.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/base_audio_context.h
@@ -105,7 +105,7 @@ class MODULES_EXPORT BaseAudioContext
// Create an AudioContext for rendering to the audio hardware.
static BaseAudioContext* Create(Document&,
- const AudioContextOptions&,
+ const AudioContextOptions*,
ExceptionState&);
~BaseAudioContext() override;
@@ -138,7 +138,7 @@ class MODULES_EXPORT BaseAudioContext
void ThrowExceptionForClosedState(ExceptionState&);
AudioBuffer* createBuffer(unsigned number_of_channels,
- size_t number_of_frames,
+ uint32_t number_of_frames,
float sample_rate,
ExceptionState&);
@@ -206,7 +206,7 @@ class MODULES_EXPORT BaseAudioContext
ExceptionState&);
PeriodicWave* createPeriodicWave(const Vector<float>& real,
const Vector<float>& imag,
- const PeriodicWaveConstraints&,
+ const PeriodicWaveConstraints*,
ExceptionState&);
// IIRFilter
@@ -261,7 +261,7 @@ class MODULES_EXPORT BaseAudioContext
const AtomicString& InterfaceName() const final;
ExecutionContext* GetExecutionContext() const final;
- DEFINE_ATTRIBUTE_EVENT_LISTENER(statechange);
+ DEFINE_ATTRIBUTE_EVENT_LISTENER(statechange, kStatechange);
void StartRendering();
@@ -296,6 +296,8 @@ class MODULES_EXPORT BaseAudioContext
// Does nothing when the worklet global scope does not exist.
void UpdateWorkletGlobalScopeOnRenderingThread();
+ void set_was_audible_for_testing(bool value) { was_audible_ = value; }
+
protected:
enum ContextType { kRealtimeContext, kOfflineContext };
@@ -322,7 +324,7 @@ class MODULES_EXPORT BaseAudioContext
void RejectPendingDecodeAudioDataResolvers();
- AudioIOPosition OutputPosition();
+ AudioIOPosition OutputPosition() const;
// Returns the Document wich wich the instance is associated.
Document* GetDocument() const;
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/biquad_dsp_kernel.cc b/chromium/third_party/blink/renderer/modules/webaudio/biquad_dsp_kernel.cc
index 51afc5bdf50..5b72b8750d7 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/biquad_dsp_kernel.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/biquad_dsp_kernel.cc
@@ -48,13 +48,13 @@ static bool hasConstantValues(float* values, int frames_to_process) {
void BiquadDSPKernel::UpdateCoefficientsIfNecessary(int frames_to_process) {
if (GetBiquadProcessor()->FilterCoefficientsDirty()) {
- float cutoff_frequency[AudioUtilities::kRenderQuantumFrames];
- float q[AudioUtilities::kRenderQuantumFrames];
- float gain[AudioUtilities::kRenderQuantumFrames];
- float detune[AudioUtilities::kRenderQuantumFrames]; // in Cents
+ float cutoff_frequency[audio_utilities::kRenderQuantumFrames];
+ float q[audio_utilities::kRenderQuantumFrames];
+ float gain[audio_utilities::kRenderQuantumFrames];
+ float detune[audio_utilities::kRenderQuantumFrames]; // in Cents
SECURITY_CHECK(static_cast<unsigned>(frames_to_process) <=
- AudioUtilities::kRenderQuantumFrames);
+ audio_utilities::kRenderQuantumFrames);
if (GetBiquadProcessor()->HasSampleAccurateValues()) {
GetBiquadProcessor()->Parameter1().CalculateSampleAccurateValues(
@@ -164,7 +164,7 @@ void BiquadDSPKernel::UpdateTailTime(int coef_index) {
void BiquadDSPKernel::Process(const float* source,
float* destination,
- size_t frames_to_process) {
+ uint32_t frames_to_process) {
DCHECK(source);
DCHECK(destination);
DCHECK(GetBiquadProcessor());
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/biquad_dsp_kernel.h b/chromium/third_party/blink/renderer/modules/webaudio/biquad_dsp_kernel.h
index 21fe6b0a34e..c8b124a072e 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/biquad_dsp_kernel.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/biquad_dsp_kernel.h
@@ -46,7 +46,7 @@ class BiquadDSPKernel final : public AudioDSPKernel {
// AudioDSPKernel
void Process(const float* source,
float* dest,
- size_t frames_to_process) override;
+ uint32_t frames_to_process) override;
void Reset() override { biquad_.Reset(); }
// Get the magnitude and phase response of the filter at the given
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/biquad_filter_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/biquad_filter_node.cc
index a418bb59127..07cb774c2d2 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/biquad_filter_node.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/biquad_filter_node.cc
@@ -112,7 +112,7 @@ BiquadFilterNode* BiquadFilterNode::Create(BaseAudioContext& context,
}
BiquadFilterNode* BiquadFilterNode::Create(BaseAudioContext* context,
- const BiquadFilterOptions& options,
+ const BiquadFilterOptions* options,
ExceptionState& exception_state) {
BiquadFilterNode* node = Create(*context, exception_state);
@@ -121,11 +121,11 @@ BiquadFilterNode* BiquadFilterNode::Create(BaseAudioContext* context,
node->HandleChannelOptions(options, exception_state);
- node->setType(options.type());
- node->q()->setValue(options.Q());
- node->detune()->setValue(options.detune());
- node->frequency()->setValue(options.frequency());
- node->gain()->setValue(options.gain());
+ node->setType(options->type());
+ node->q()->setValue(options->Q());
+ node->detune()->setValue(options->detune());
+ node->frequency()->setValue(options->frequency());
+ node->gain()->setValue(options->gain());
return node;
}
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/biquad_filter_node.h b/chromium/third_party/blink/renderer/modules/webaudio/biquad_filter_node.h
index 25be501d2a9..e4a7b177333 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/biquad_filter_node.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/biquad_filter_node.h
@@ -75,7 +75,7 @@ class BiquadFilterNode final : public AudioNode {
static BiquadFilterNode* Create(BaseAudioContext&, ExceptionState&);
static BiquadFilterNode* Create(BaseAudioContext*,
- const BiquadFilterOptions&,
+ const BiquadFilterOptions*,
ExceptionState&);
void Trace(blink::Visitor*) override;
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/biquad_processor.cc b/chromium/third_party/blink/renderer/modules/webaudio/biquad_processor.cc
index 5b80b30be8b..9bc6a40bdd4 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/biquad_processor.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/biquad_processor.cc
@@ -31,7 +31,7 @@
namespace blink {
BiquadProcessor::BiquadProcessor(float sample_rate,
- size_t number_of_channels,
+ uint32_t number_of_channels,
AudioParamHandler& frequency,
AudioParamHandler& q,
AudioParamHandler& gain,
@@ -104,7 +104,7 @@ void BiquadProcessor::CheckForDirtyCoefficients() {
void BiquadProcessor::Process(const AudioBus* source,
AudioBus* destination,
- size_t frames_to_process) {
+ uint32_t frames_to_process) {
if (!IsInitialized()) {
destination->Zero();
return;
@@ -128,10 +128,10 @@ void BiquadProcessor::Process(const AudioBus* source,
frames_to_process);
}
-void BiquadProcessor::ProcessOnlyAudioParams(size_t frames_to_process) {
- DCHECK_LE(frames_to_process, AudioUtilities::kRenderQuantumFrames);
+void BiquadProcessor::ProcessOnlyAudioParams(uint32_t frames_to_process) {
+ DCHECK_LE(frames_to_process, audio_utilities::kRenderQuantumFrames);
- float values[AudioUtilities::kRenderQuantumFrames];
+ float values[audio_utilities::kRenderQuantumFrames];
parameter1_->CalculateSampleAccurateValues(values, frames_to_process);
parameter2_->CalculateSampleAccurateValues(values, frames_to_process);
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/biquad_processor.h b/chromium/third_party/blink/renderer/modules/webaudio/biquad_processor.h
index 17ea56bed34..954ac2cc33e 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/biquad_processor.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/biquad_processor.h
@@ -54,7 +54,7 @@ class BiquadProcessor final : public AudioDSPKernelProcessor {
};
BiquadProcessor(float sample_rate,
- size_t number_of_channels,
+ uint32_t number_of_channels,
AudioParamHandler& frequency,
AudioParamHandler& q,
AudioParamHandler& gain,
@@ -65,9 +65,9 @@ class BiquadProcessor final : public AudioDSPKernelProcessor {
void Process(const AudioBus* source,
AudioBus* destination,
- size_t frames_to_process) override;
+ uint32_t frames_to_process) override;
- void ProcessOnlyAudioParams(size_t frames_to_process) override;
+ void ProcessOnlyAudioParams(uint32_t frames_to_process) override;
// Get the magnitude and phase response of the filter at the given
// set of frequencies (in Hz). The phase response is in radians.
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/channel_merger_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/channel_merger_node.cc
index e6c2b074ba2..a1d325c8c4e 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/channel_merger_node.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/channel_merger_node.cc
@@ -64,7 +64,7 @@ scoped_refptr<ChannelMergerHandler> ChannelMergerHandler::Create(
new ChannelMergerHandler(node, sample_rate, number_of_inputs));
}
-void ChannelMergerHandler::Process(size_t frames_to_process) {
+void ChannelMergerHandler::Process(uint32_t frames_to_process) {
AudioNodeOutput& output = this->Output(0);
DCHECK_EQ(frames_to_process, output.Bus()->length());
@@ -94,7 +94,7 @@ void ChannelMergerHandler::Process(size_t frames_to_process) {
}
}
-void ChannelMergerHandler::SetChannelCount(unsigned long channel_count,
+void ChannelMergerHandler::SetChannelCount(unsigned channel_count,
ExceptionState& exception_state) {
DCHECK(IsMainThread());
BaseAudioContext::GraphAutoLocker locker(Context());
@@ -165,10 +165,10 @@ ChannelMergerNode* ChannelMergerNode::Create(BaseAudioContext& context,
ChannelMergerNode* ChannelMergerNode::Create(
BaseAudioContext* context,
- const ChannelMergerOptions& options,
+ const ChannelMergerOptions* options,
ExceptionState& exception_state) {
ChannelMergerNode* node =
- Create(*context, options.numberOfInputs(), exception_state);
+ Create(*context, options->numberOfInputs(), exception_state);
if (!node)
return nullptr;
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/channel_merger_node.h b/chromium/third_party/blink/renderer/modules/webaudio/channel_merger_node.h
index 7ab089c8657..eb92527a920 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/channel_merger_node.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/channel_merger_node.h
@@ -43,8 +43,8 @@ class ChannelMergerHandler final : public AudioHandler {
float sample_rate,
unsigned number_of_inputs);
- void Process(size_t frames_to_process) override;
- void SetChannelCount(unsigned long, ExceptionState&) final;
+ void Process(uint32_t frames_to_process) override;
+ void SetChannelCount(unsigned, ExceptionState&) final;
void SetChannelCountMode(const String&, ExceptionState&) final;
double TailTime() const override { return 0; }
@@ -66,7 +66,7 @@ class ChannelMergerNode final : public AudioNode {
unsigned number_of_inputs,
ExceptionState&);
static ChannelMergerNode* Create(BaseAudioContext*,
- const ChannelMergerOptions&,
+ const ChannelMergerOptions*,
ExceptionState&);
private:
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/channel_splitter_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/channel_splitter_node.cc
index 1b87eb74314..6471c29edf2 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/channel_splitter_node.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/channel_splitter_node.cc
@@ -60,7 +60,7 @@ scoped_refptr<ChannelSplitterHandler> ChannelSplitterHandler::Create(
new ChannelSplitterHandler(node, sample_rate, number_of_outputs));
}
-void ChannelSplitterHandler::Process(size_t frames_to_process) {
+void ChannelSplitterHandler::Process(uint32_t frames_to_process) {
AudioBus* source = Input(0).Bus();
DCHECK(source);
DCHECK_EQ(frames_to_process, source->length());
@@ -83,7 +83,7 @@ void ChannelSplitterHandler::Process(size_t frames_to_process) {
}
}
-void ChannelSplitterHandler::SetChannelCount(unsigned long channel_count,
+void ChannelSplitterHandler::SetChannelCount(unsigned channel_count,
ExceptionState& exception_state) {
DCHECK(IsMainThread());
BaseAudioContext::GraphAutoLocker locker(Context());
@@ -171,10 +171,10 @@ ChannelSplitterNode* ChannelSplitterNode::Create(
ChannelSplitterNode* ChannelSplitterNode::Create(
BaseAudioContext* context,
- const ChannelSplitterOptions& options,
+ const ChannelSplitterOptions* options,
ExceptionState& exception_state) {
ChannelSplitterNode* node =
- Create(*context, options.numberOfOutputs(), exception_state);
+ Create(*context, options->numberOfOutputs(), exception_state);
if (!node)
return nullptr;
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/channel_splitter_node.h b/chromium/third_party/blink/renderer/modules/webaudio/channel_splitter_node.h
index 31e2e9af892..155c8f17014 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/channel_splitter_node.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/channel_splitter_node.h
@@ -40,8 +40,8 @@ class ChannelSplitterHandler final : public AudioHandler {
Create(AudioNode&, float sample_rate, unsigned number_of_outputs);
// AudioHandler
- void Process(size_t frames_to_process) override;
- void SetChannelCount(unsigned long, ExceptionState&) final;
+ void Process(uint32_t frames_to_process) override;
+ void SetChannelCount(unsigned, ExceptionState&) final;
void SetChannelCountMode(const String&, ExceptionState&) final;
void SetChannelInterpretation(const String&, ExceptionState&) final;
@@ -64,7 +64,7 @@ class ChannelSplitterNode final : public AudioNode {
unsigned number_of_outputs,
ExceptionState&);
static ChannelSplitterNode* Create(BaseAudioContext*,
- const ChannelSplitterOptions&,
+ const ChannelSplitterOptions*,
ExceptionState&);
private:
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/constant_source_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/constant_source_node.cc
index 21349c352e8..b56415124b2 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/constant_source_node.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/constant_source_node.cc
@@ -20,7 +20,7 @@ ConstantSourceHandler::ConstantSourceHandler(AudioNode& node,
AudioParamHandler& offset)
: AudioScheduledSourceHandler(kNodeTypeConstantSource, node, sample_rate),
offset_(&offset),
- sample_accurate_values_(AudioUtilities::kRenderQuantumFrames) {
+ sample_accurate_values_(audio_utilities::kRenderQuantumFrames) {
// A ConstantSource is always mono.
AddOutput(1);
@@ -38,7 +38,7 @@ ConstantSourceHandler::~ConstantSourceHandler() {
Uninitialize();
}
-void ConstantSourceHandler::Process(size_t frames_to_process) {
+void ConstantSourceHandler::Process(uint32_t frames_to_process) {
AudioBus* output_bus = Output(0).Bus();
DCHECK(output_bus);
@@ -132,7 +132,7 @@ ConstantSourceNode* ConstantSourceNode::Create(
ConstantSourceNode* ConstantSourceNode::Create(
BaseAudioContext* context,
- const ConstantSourceOptions& options,
+ const ConstantSourceOptions* options,
ExceptionState& exception_state) {
DCHECK(IsMainThread());
@@ -141,7 +141,7 @@ ConstantSourceNode* ConstantSourceNode::Create(
if (!node)
return nullptr;
- node->offset()->setValue(options.offset());
+ node->offset()->setValue(options->offset());
return node;
}
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/constant_source_node.h b/chromium/third_party/blink/renderer/modules/webaudio/constant_source_node.h
index cc4f97adc33..4885e2d78e7 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/constant_source_node.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/constant_source_node.h
@@ -27,7 +27,7 @@ class ConstantSourceHandler final : public AudioScheduledSourceHandler {
~ConstantSourceHandler() override;
// AudioHandler
- void Process(size_t frames_to_process) override;
+ void Process(uint32_t frames_to_process) override;
private:
ConstantSourceHandler(AudioNode&,
@@ -47,7 +47,7 @@ class ConstantSourceNode final : public AudioScheduledSourceNode {
public:
static ConstantSourceNode* Create(BaseAudioContext&, ExceptionState&);
static ConstantSourceNode* Create(BaseAudioContext*,
- const ConstantSourceOptions&,
+ const ConstantSourceOptions*,
ExceptionState&);
void Trace(blink::Visitor*) override;
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/convolver_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/convolver_node.cc
index 3e6b0b4f240..4886bab5e51 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/convolver_node.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/convolver_node.cc
@@ -65,7 +65,7 @@ ConvolverHandler::~ConvolverHandler() {
Uninitialize();
}
-void ConvolverHandler::Process(size_t frames_to_process) {
+void ConvolverHandler::Process(uint32_t frames_to_process) {
AudioBus* output_bus = Output(0).Bus();
DCHECK(output_bus);
@@ -140,7 +140,7 @@ void ConvolverHandler::SetBuffer(AudioBuffer* buffer,
// Create the reverb with the given impulse response.
std::unique_ptr<Reverb> reverb = std::make_unique<Reverb>(
- buffer_bus.get(), AudioUtilities::kRenderQuantumFrames, MaxFFTSize,
+ buffer_bus.get(), audio_utilities::kRenderQuantumFrames, MaxFFTSize,
Context() && Context()->HasRealtimeConstraint(), normalize_);
{
@@ -202,7 +202,7 @@ unsigned ConvolverHandler::ComputeNumberOfOutputChannels(
return clampTo(std::max(input_channels, response_channels), 1, 2);
}
-void ConvolverHandler::SetChannelCount(unsigned long channel_count,
+void ConvolverHandler::SetChannelCount(unsigned channel_count,
ExceptionState& exception_state) {
DCHECK(IsMainThread());
BaseAudioContext::GraphAutoLocker locker(Context());
@@ -277,7 +277,7 @@ ConvolverNode* ConvolverNode::Create(BaseAudioContext& context,
}
ConvolverNode* ConvolverNode::Create(BaseAudioContext* context,
- const ConvolverOptions& options,
+ const ConvolverOptions* options,
ExceptionState& exception_state) {
ConvolverNode* node = Create(*context, exception_state);
@@ -288,9 +288,9 @@ ConvolverNode* ConvolverNode::Create(BaseAudioContext* context,
// It is important to set normalize first because setting the buffer will
// examing the normalize attribute to see if normalization needs to be done.
- node->setNormalize(!options.disableNormalization());
- if (options.hasBuffer())
- node->setBuffer(options.buffer(), exception_state);
+ node->setNormalize(!options->disableNormalization());
+ if (options->hasBuffer())
+ node->setBuffer(options->buffer(), exception_state);
return node;
}
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/convolver_node.h b/chromium/third_party/blink/renderer/modules/webaudio/convolver_node.h
index fc11d61ce49..9b28e5efd01 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/convolver_node.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/convolver_node.h
@@ -47,7 +47,7 @@ class MODULES_EXPORT ConvolverHandler final : public AudioHandler {
~ConvolverHandler() override;
// AudioHandler
- void Process(size_t frames_to_process) override;
+ void Process(uint32_t frames_to_process) override;
// Called in the main thread when the number of channels for the input may
// have changed.
void CheckNumberOfChannelsForInput(AudioNodeInput*) override;
@@ -58,7 +58,7 @@ class MODULES_EXPORT ConvolverHandler final : public AudioHandler {
bool Normalize() const { return normalize_; }
void SetNormalize(bool normalize) { normalize_ = normalize; }
- void SetChannelCount(unsigned long, ExceptionState&) final;
+ void SetChannelCount(unsigned, ExceptionState&) final;
void SetChannelCountMode(const String&, ExceptionState&) final;
private:
@@ -95,7 +95,7 @@ class MODULES_EXPORT ConvolverNode final : public AudioNode {
public:
static ConvolverNode* Create(BaseAudioContext&, ExceptionState&);
static ConvolverNode* Create(BaseAudioContext*,
- const ConvolverOptions&,
+ const ConvolverOptions*,
ExceptionState&);
AudioBuffer* buffer() const;
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/default_audio_destination_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/default_audio_destination_node.cc
index 964e68ea31a..9d8b35737b1 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/default_audio_destination_node.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/default_audio_destination_node.cc
@@ -35,7 +35,6 @@
#include "third_party/blink/renderer/platform/audio/audio_utilities.h"
#include "third_party/blink/renderer/platform/audio/denormal_disabler.h"
#include "third_party/blink/renderer/platform/instrumentation/tracing/trace_event.h"
-#include "third_party/blink/renderer/platform/wtf/atomics.h"
namespace blink {
@@ -86,7 +85,7 @@ void DefaultAudioDestinationHandler::Uninitialize() {
}
void DefaultAudioDestinationHandler::SetChannelCount(
- unsigned long channel_count,
+ unsigned channel_count,
ExceptionState& exception_state) {
DCHECK(IsMainThread());
@@ -134,7 +133,7 @@ void DefaultAudioDestinationHandler::RestartRendering() {
StartRendering();
}
-unsigned long DefaultAudioDestinationHandler::MaxChannelCount() const {
+uint32_t DefaultAudioDestinationHandler::MaxChannelCount() const {
return AudioDestination::MaxChannelCount();
}
@@ -147,7 +146,7 @@ double DefaultAudioDestinationHandler::SampleRate() const {
void DefaultAudioDestinationHandler::Render(
AudioBus* destination_bus,
- size_t number_of_frames,
+ uint32_t number_of_frames,
const AudioIOPosition& output_position) {
TRACE_EVENT0("webaudio", "DefaultAudioDestinationHandler::Render");
@@ -198,8 +197,7 @@ void DefaultAudioDestinationHandler::Render(
Context()->HandlePostRenderTasks(destination_bus);
// Advances the current sample-frame.
- size_t new_sample_frame = current_sample_frame_ + number_of_frames;
- ReleaseStore(&current_sample_frame_, new_sample_frame);
+ AdvanceCurrentSampleFrame(number_of_frames);
Context()->UpdateWorkletGlobalScopeOnRenderingThread();
}
@@ -259,7 +257,8 @@ DefaultAudioDestinationNode::DefaultAudioDestinationNode(
DefaultAudioDestinationNode* DefaultAudioDestinationNode::Create(
BaseAudioContext* context,
const WebAudioLatencyHint& latency_hint) {
- return new DefaultAudioDestinationNode(*context, latency_hint);
+ return MakeGarbageCollected<DefaultAudioDestinationNode>(*context,
+ latency_hint);
}
} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/default_audio_destination_node.h b/chromium/third_party/blink/renderer/modules/webaudio/default_audio_destination_node.h
index 0507a5a85ff..1451f6cddc5 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/default_audio_destination_node.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/default_audio_destination_node.h
@@ -50,7 +50,7 @@ class DefaultAudioDestinationHandler final : public AudioDestinationHandler,
void Dispose() override;
void Initialize() override;
void Uninitialize() override;
- void SetChannelCount(unsigned long, ExceptionState&) override;
+ void SetChannelCount(unsigned, ExceptionState&) override;
double LatencyTime() const override { return 0; }
double TailTime() const override { return 0; }
bool RequiresTailProcessing() const final { return false; }
@@ -59,14 +59,14 @@ class DefaultAudioDestinationHandler final : public AudioDestinationHandler,
void StartRendering() override;
void StopRendering() override;
void RestartRendering() override;
- unsigned long MaxChannelCount() const override;
+ uint32_t MaxChannelCount() const override;
double SampleRate() const override;
// For AudioIOCallback. This is invoked by the platform audio destination to
// get the next render quantum into |destination_bus| and update
// |output_position|.
void Render(AudioBus* destination_bus,
- size_t number_of_frames,
+ uint32_t number_of_frames,
const AudioIOPosition& output_position) final;
// Returns a hadrware callback buffer size from audio infra.
@@ -96,7 +96,6 @@ class DefaultAudioDestinationNode final : public AudioDestinationNode {
static DefaultAudioDestinationNode* Create(BaseAudioContext*,
const WebAudioLatencyHint&);
- private:
explicit DefaultAudioDestinationNode(BaseAudioContext&,
const WebAudioLatencyHint&);
};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/deferred_task_handler.cc b/chromium/third_party/blink/renderer/modules/webaudio/deferred_task_handler.cc
index 073fbc01c0f..9d409388f14 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/deferred_task_handler.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/deferred_task_handler.cc
@@ -30,8 +30,9 @@
#include "third_party/blink/renderer/modules/webaudio/audio_node_output.h"
#include "third_party/blink/renderer/modules/webaudio/offline_audio_context.h"
#include "third_party/blink/renderer/platform/cross_thread_functional.h"
+#include "third_party/blink/renderer/platform/scheduler/public/post_cancellable_task.h"
+#include "third_party/blink/renderer/platform/scheduler/public/post_cross_thread_task.h"
#include "third_party/blink/renderer/platform/scheduler/public/thread.h"
-#include "third_party/blink/renderer/platform/web_task_runner.h"
namespace blink {
@@ -137,12 +138,12 @@ void DeferredTaskHandler::AddAutomaticPullNode(
}
}
-void DeferredTaskHandler::RemoveAutomaticPullNode(
- scoped_refptr<AudioHandler> node) {
+void DeferredTaskHandler::RemoveAutomaticPullNode(AudioHandler* node) {
AssertGraphOwner();
- if (automatic_pull_handlers_.Contains(node)) {
- automatic_pull_handlers_.erase(node);
+ auto it = automatic_pull_handlers_.find(node);
+ if (it != automatic_pull_handlers_.end()) {
+ automatic_pull_handlers_.erase(it);
automatic_pull_handlers_need_updating_ = true;
}
}
@@ -156,7 +157,8 @@ void DeferredTaskHandler::UpdateAutomaticPullNodes() {
}
}
-void DeferredTaskHandler::ProcessAutomaticPullNodes(size_t frames_to_process) {
+void DeferredTaskHandler::ProcessAutomaticPullNodes(
+ uint32_t frames_to_process) {
DCHECK(IsAudioThread());
for (unsigned i = 0; i < rendering_automatic_pull_handlers_.size(); ++i) {
@@ -167,6 +169,7 @@ void DeferredTaskHandler::ProcessAutomaticPullNodes(size_t frames_to_process) {
void DeferredTaskHandler::AddTailProcessingHandler(
scoped_refptr<AudioHandler> handler) {
+ DCHECK(accepts_tail_processing_);
AssertGraphOwner();
if (!tail_processing_handlers_.Contains(handler)) {
@@ -177,12 +180,11 @@ void DeferredTaskHandler::AddTailProcessingHandler(
}
}
-void DeferredTaskHandler::RemoveTailProcessingHandler(
- scoped_refptr<AudioHandler> handler,
- bool disable_outputs) {
+void DeferredTaskHandler::RemoveTailProcessingHandler(AudioHandler* handler,
+ bool disable_outputs) {
AssertGraphOwner();
- size_t index = tail_processing_handlers_.Find(handler);
+ wtf_size_t index = tail_processing_handlers_.Find(handler);
if (index != kNotFound) {
#if DEBUG_AUDIONODE_REFERENCES > 1
handler->RemoveTailProcessingDebug(disable_outputs);
@@ -191,7 +193,8 @@ void DeferredTaskHandler::RemoveTailProcessingHandler(
if (disable_outputs) {
// Disabling of outputs should happen on the main thread so save this
// handler so it can be processed there.
- finished_tail_processing_handlers_.push_back(handler);
+ finished_tail_processing_handlers_.push_back(
+ std::move(tail_processing_handlers_[index]));
}
tail_processing_handlers_.EraseAt(index);
@@ -223,7 +226,7 @@ void DeferredTaskHandler::UpdateTailProcessingHandlers() {
handler->Context()->currentTime(), handler->TailTime(),
handler->LatencyTime());
#endif
- RemoveTailProcessingHandler(handler, true);
+ RemoveTailProcessingHandler(handler.get(), true);
}
}
}
@@ -296,7 +299,8 @@ void DeferredTaskHandler::ContextWillBeDestroyed() {
// Some handlers might live because of their cross thread tasks.
}
-DeferredTaskHandler::GraphAutoLocker::GraphAutoLocker(BaseAudioContext* context)
+DeferredTaskHandler::GraphAutoLocker::GraphAutoLocker(
+ const BaseAudioContext* context)
: handler_(context->GetDeferredTaskHandler()) {
handler_.lock();
}
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/deferred_task_handler.h b/chromium/third_party/blink/renderer/modules/webaudio/deferred_task_handler.h
index aa1752797db..cc72c0b87aa 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/deferred_task_handler.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/deferred_task_handler.h
@@ -75,11 +75,11 @@ class MODULES_EXPORT DeferredTaskHandler final
// called by the nodes who want to add/remove themselves into/from the
// automatic pull lists.
void AddAutomaticPullNode(scoped_refptr<AudioHandler>);
- void RemoveAutomaticPullNode(scoped_refptr<AudioHandler>);
+ void RemoveAutomaticPullNode(AudioHandler*);
// Called right before handlePostRenderTasks() to handle nodes which need to
// be pulled even when they are not connected to anything.
- void ProcessAutomaticPullNodes(size_t frames_to_process);
+ void ProcessAutomaticPullNodes(uint32_t frames_to_process);
// Keep track of AudioNode's that have their channel count mode changed. We
// process the changes in the post rendering phase.
@@ -109,6 +109,9 @@ class MODULES_EXPORT DeferredTaskHandler final
void RequestToDeleteHandlersOnMainThread();
void ClearHandlersToBeDeleted();
+ bool AcceptsTailProcessing() const { return accepts_tail_processing_; }
+ void StopAcceptingTailProcessing() { accepts_tail_processing_ = false; }
+
// If |node| requires tail processing, add it to the list of tail
// nodes so the tail is processed.
void AddTailProcessingHandler(scoped_refptr<AudioHandler>);
@@ -117,8 +120,7 @@ class MODULES_EXPORT DeferredTaskHandler final
// complete). Set |disable_outputs| to true if the outputs of the handler
// should also be disabled. This should be true if the tail is done. But if
// we're reconnected or re-enabled, then |disable_outputs| should be false.
- void RemoveTailProcessingHandler(scoped_refptr<AudioHandler> node,
- bool disable_outputs);
+ void RemoveTailProcessingHandler(AudioHandler*, bool disable_outputs);
// Remove all tail processing nodes. Should be called only when the
// context is done.
@@ -161,7 +163,7 @@ class MODULES_EXPORT DeferredTaskHandler final
explicit GraphAutoLocker(DeferredTaskHandler& handler) : handler_(handler) {
handler_.lock();
}
- explicit GraphAutoLocker(BaseAudioContext*);
+ explicit GraphAutoLocker(const BaseAudioContext*);
~GraphAutoLocker() { handler_.unlock(); }
@@ -229,11 +231,16 @@ class MODULES_EXPORT DeferredTaskHandler final
// Nodes that are processing its tail.
Vector<scoped_refptr<AudioHandler>> tail_processing_handlers_;
+
// Tail processing nodes that are now finished and want the output to be
// disabled. This is updated in the audio thread (with the graph lock). The
// main thread will disable the outputs.
Vector<scoped_refptr<AudioHandler>> finished_tail_processing_handlers_;
+ // Once the associated context closes, new tail processing handlers are not
+ // accepted.
+ bool accepts_tail_processing_ = true;
+
scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
// Graph locking.
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/delay_dsp_kernel.cc b/chromium/third_party/blink/renderer/modules/webaudio/delay_dsp_kernel.cc
index 0d50d42071d..946858750b1 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/delay_dsp_kernel.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/delay_dsp_kernel.cc
@@ -31,7 +31,7 @@
namespace blink {
DelayDSPKernel::DelayDSPKernel(DelayProcessor* processor)
- : AudioDelayDSPKernel(processor, AudioUtilities::kRenderQuantumFrames) {
+ : AudioDelayDSPKernel(processor, audio_utilities::kRenderQuantumFrames) {
DCHECK(processor);
DCHECK_GT(processor->SampleRate(), 0);
if (!(processor && processor->SampleRate() > 0))
@@ -53,7 +53,7 @@ bool DelayDSPKernel::HasSampleAccurateValues() {
}
void DelayDSPKernel::CalculateSampleAccurateValues(float* delay_times,
- size_t frames_to_process) {
+ uint32_t frames_to_process) {
GetDelayProcessor()->DelayTime().CalculateSampleAccurateValues(
delay_times, frames_to_process);
}
@@ -62,10 +62,10 @@ double DelayDSPKernel::DelayTime(float) {
return GetDelayProcessor()->DelayTime().FinalValue();
}
-void DelayDSPKernel::ProcessOnlyAudioParams(size_t frames_to_process) {
- DCHECK_LE(frames_to_process, AudioUtilities::kRenderQuantumFrames);
+void DelayDSPKernel::ProcessOnlyAudioParams(uint32_t frames_to_process) {
+ DCHECK_LE(frames_to_process, audio_utilities::kRenderQuantumFrames);
- float values[AudioUtilities::kRenderQuantumFrames];
+ float values[audio_utilities::kRenderQuantumFrames];
GetDelayProcessor()->DelayTime().CalculateSampleAccurateValues(
values, frames_to_process);
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/delay_dsp_kernel.h b/chromium/third_party/blink/renderer/modules/webaudio/delay_dsp_kernel.h
index 8ba48b472d5..e4d0529da20 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/delay_dsp_kernel.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/delay_dsp_kernel.h
@@ -40,10 +40,10 @@ class DelayDSPKernel final : public AudioDelayDSPKernel {
protected:
bool HasSampleAccurateValues() override;
void CalculateSampleAccurateValues(float* delay_times,
- size_t frames_to_process) override;
+ uint32_t frames_to_process) override;
double DelayTime(float sample_rate) override;
- void ProcessOnlyAudioParams(size_t frames_to_process) override;
+ void ProcessOnlyAudioParams(uint32_t frames_to_process) override;
private:
DelayProcessor* GetDelayProcessor() {
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/delay_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/delay_node.cc
index cc527506953..e06c962d79a 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/delay_node.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/delay_node.cc
@@ -104,21 +104,21 @@ DelayNode* DelayNode::Create(BaseAudioContext& context,
return nullptr;
}
- return new DelayNode(context, max_delay_time);
+ return MakeGarbageCollected<DelayNode>(context, max_delay_time);
}
DelayNode* DelayNode::Create(BaseAudioContext* context,
- const DelayOptions& options,
+ const DelayOptions* options,
ExceptionState& exception_state) {
// maxDelayTime has a default value specified.
- DelayNode* node = Create(*context, options.maxDelayTime(), exception_state);
+ DelayNode* node = Create(*context, options->maxDelayTime(), exception_state);
if (!node)
return nullptr;
node->HandleChannelOptions(options, exception_state);
- node->delayTime()->setValue(options.delayTime());
+ node->delayTime()->setValue(options->delayTime());
return node;
}
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/delay_node.h b/chromium/third_party/blink/renderer/modules/webaudio/delay_node.h
index 37a5a200a39..72f6ff716b3 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/delay_node.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/delay_node.h
@@ -59,14 +59,15 @@ class DelayNode final : public AudioNode {
double max_delay_time,
ExceptionState&);
static DelayNode* Create(BaseAudioContext*,
- const DelayOptions&,
+ const DelayOptions*,
ExceptionState&);
+
+ DelayNode(BaseAudioContext&, double max_delay_time);
+
void Trace(blink::Visitor*) override;
AudioParam* delayTime();
private:
- DelayNode(BaseAudioContext&, double max_delay_time);
-
Member<AudioParam> delay_time_;
};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/delay_processor.cc b/chromium/third_party/blink/renderer/modules/webaudio/delay_processor.cc
index 491c68315ea..b1c3580aa9c 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/delay_processor.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/delay_processor.cc
@@ -47,10 +47,10 @@ std::unique_ptr<AudioDSPKernel> DelayProcessor::CreateKernel() {
return std::make_unique<DelayDSPKernel>(this);
}
-void DelayProcessor::ProcessOnlyAudioParams(size_t frames_to_process) {
- DCHECK_LE(frames_to_process, AudioUtilities::kRenderQuantumFrames);
+void DelayProcessor::ProcessOnlyAudioParams(uint32_t frames_to_process) {
+ DCHECK_LE(frames_to_process, audio_utilities::kRenderQuantumFrames);
- float values[AudioUtilities::kRenderQuantumFrames];
+ float values[audio_utilities::kRenderQuantumFrames];
delay_time_->CalculateSampleAccurateValues(values, frames_to_process);
}
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/delay_processor.h b/chromium/third_party/blink/renderer/modules/webaudio/delay_processor.h
index e4ab174a13e..bb19b9f5e61 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/delay_processor.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/delay_processor.h
@@ -45,7 +45,7 @@ class DelayProcessor final : public AudioDSPKernelProcessor {
std::unique_ptr<AudioDSPKernel> CreateKernel() override;
- void ProcessOnlyAudioParams(size_t frames_to_process) override;
+ void ProcessOnlyAudioParams(uint32_t frames_to_process) override;
AudioParamHandler& DelayTime() const { return *delay_time_; }
double MaxDelayTime() { return max_delay_time_; }
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/dynamics_compressor_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/dynamics_compressor_node.cc
index 1fd4e451a0d..099e5ce64be 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/dynamics_compressor_node.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/dynamics_compressor_node.cc
@@ -77,7 +77,7 @@ DynamicsCompressorHandler::~DynamicsCompressorHandler() {
Uninitialize();
}
-void DynamicsCompressorHandler::Process(size_t frames_to_process) {
+void DynamicsCompressorHandler::Process(uint32_t frames_to_process) {
AudioBus* output_bus = Output(0).Bus();
DCHECK(output_bus);
@@ -105,11 +105,11 @@ void DynamicsCompressorHandler::Process(size_t frames_to_process) {
}
void DynamicsCompressorHandler::ProcessOnlyAudioParams(
- size_t frames_to_process) {
+ uint32_t frames_to_process) {
DCHECK(Context()->IsAudioThread());
- DCHECK_LE(frames_to_process, AudioUtilities::kRenderQuantumFrames);
+ DCHECK_LE(frames_to_process, audio_utilities::kRenderQuantumFrames);
- float values[AudioUtilities::kRenderQuantumFrames];
+ float values[audio_utilities::kRenderQuantumFrames];
threshold_->CalculateSampleAccurateValues(values, frames_to_process);
knee_->CalculateSampleAccurateValues(values, frames_to_process);
@@ -141,7 +141,7 @@ double DynamicsCompressorHandler::LatencyTime() const {
}
void DynamicsCompressorHandler::SetChannelCount(
- unsigned long channel_count,
+ unsigned channel_count,
ExceptionState& exception_state) {
DCHECK(IsMainThread());
BaseAudioContext::GraphAutoLocker locker(Context());
@@ -245,12 +245,12 @@ DynamicsCompressorNode* DynamicsCompressorNode::Create(
return nullptr;
}
- return new DynamicsCompressorNode(context);
+ return MakeGarbageCollected<DynamicsCompressorNode>(context);
}
DynamicsCompressorNode* DynamicsCompressorNode::Create(
BaseAudioContext* context,
- const DynamicsCompressorOptions& options,
+ const DynamicsCompressorOptions* options,
ExceptionState& exception_state) {
DynamicsCompressorNode* node = Create(*context, exception_state);
@@ -259,11 +259,11 @@ DynamicsCompressorNode* DynamicsCompressorNode::Create(
node->HandleChannelOptions(options, exception_state);
- node->attack()->setValue(options.attack());
- node->knee()->setValue(options.knee());
- node->ratio()->setValue(options.ratio());
- node->release()->setValue(options.release());
- node->threshold()->setValue(options.threshold());
+ node->attack()->setValue(options->attack());
+ node->knee()->setValue(options->knee());
+ node->ratio()->setValue(options->ratio());
+ node->release()->setValue(options->release());
+ node->threshold()->setValue(options->threshold());
return node;
}
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/dynamics_compressor_node.h b/chromium/third_party/blink/renderer/modules/webaudio/dynamics_compressor_node.h
index 1479b9271c2..a4f681a96c4 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/dynamics_compressor_node.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/dynamics_compressor_node.h
@@ -52,13 +52,13 @@ class MODULES_EXPORT DynamicsCompressorHandler final : public AudioHandler {
~DynamicsCompressorHandler() override;
// AudioHandler
- void Process(size_t frames_to_process) override;
- void ProcessOnlyAudioParams(size_t frames_to_process) override;
+ void Process(uint32_t frames_to_process) override;
+ void ProcessOnlyAudioParams(uint32_t frames_to_process) override;
void Initialize() override;
float ReductionValue() const { return NoBarrierLoad(&reduction_); }
- void SetChannelCount(unsigned long, ExceptionState&) final;
+ void SetChannelCount(unsigned, ExceptionState&) final;
void SetChannelCountMode(const String&, ExceptionState&) final;
private:
@@ -90,8 +90,11 @@ class MODULES_EXPORT DynamicsCompressorNode final : public AudioNode {
public:
static DynamicsCompressorNode* Create(BaseAudioContext&, ExceptionState&);
static DynamicsCompressorNode* Create(BaseAudioContext*,
- const DynamicsCompressorOptions&,
+ const DynamicsCompressorOptions*,
ExceptionState&);
+
+ DynamicsCompressorNode(BaseAudioContext&);
+
void Trace(blink::Visitor*) override;
AudioParam* threshold() const;
@@ -102,7 +105,6 @@ class MODULES_EXPORT DynamicsCompressorNode final : public AudioNode {
AudioParam* release() const;
private:
- DynamicsCompressorNode(BaseAudioContext&);
DynamicsCompressorHandler& GetDynamicsCompressorHandler() const;
Member<AudioParam> threshold_;
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/gain_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/gain_node.cc
index b2e8c6328b6..dfa6dfb85c5 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/gain_node.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/gain_node.cc
@@ -38,9 +38,9 @@ GainHandler::GainHandler(AudioNode& node,
: AudioHandler(kNodeTypeGain, node, sample_rate),
gain_(&gain),
sample_accurate_gain_values_(
- AudioUtilities::kRenderQuantumFrames) // FIXME: can probably
- // share temp buffer
- // in context
+ audio_utilities::kRenderQuantumFrames) // FIXME: can probably
+ // share temp buffer
+ // in context
{
AddInput();
AddOutput(1);
@@ -54,7 +54,7 @@ scoped_refptr<GainHandler> GainHandler::Create(AudioNode& node,
return base::AdoptRef(new GainHandler(node, sample_rate, gain));
}
-void GainHandler::Process(size_t frames_to_process) {
+void GainHandler::Process(uint32_t frames_to_process) {
// FIXME: for some cases there is a nice optimization to avoid processing
// here, and let the gain change happen in the summing junction input of the
// AudioNode we're connected to. Then we can avoid all of the following:
@@ -89,11 +89,11 @@ void GainHandler::Process(size_t frames_to_process) {
}
}
-void GainHandler::ProcessOnlyAudioParams(size_t frames_to_process) {
+void GainHandler::ProcessOnlyAudioParams(uint32_t frames_to_process) {
DCHECK(Context()->IsAudioThread());
- DCHECK_LE(frames_to_process, AudioUtilities::kRenderQuantumFrames);
+ DCHECK_LE(frames_to_process, audio_utilities::kRenderQuantumFrames);
- float values[AudioUtilities::kRenderQuantumFrames];
+ float values[audio_utilities::kRenderQuantumFrames];
gain_->CalculateSampleAccurateValues(values, frames_to_process);
}
@@ -154,11 +154,11 @@ GainNode* GainNode::Create(BaseAudioContext& context,
return nullptr;
}
- return new GainNode(context);
+ return MakeGarbageCollected<GainNode>(context);
}
GainNode* GainNode::Create(BaseAudioContext* context,
- const GainOptions& options,
+ const GainOptions* options,
ExceptionState& exception_state) {
GainNode* node = Create(*context, exception_state);
@@ -167,7 +167,7 @@ GainNode* GainNode::Create(BaseAudioContext* context,
node->HandleChannelOptions(options, exception_state);
- node->gain()->setValue(options.gain());
+ node->gain()->setValue(options->gain());
return node;
}
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/gain_node.h b/chromium/third_party/blink/renderer/modules/webaudio/gain_node.h
index 2837a2a9930..3e2b34481a1 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/gain_node.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/gain_node.h
@@ -47,8 +47,8 @@ class GainHandler final : public AudioHandler {
AudioParamHandler& gain);
// AudioHandler
- void Process(size_t frames_to_process) override;
- void ProcessOnlyAudioParams(size_t frames_to_process) override;
+ void Process(uint32_t frames_to_process) override;
+ void ProcessOnlyAudioParams(uint32_t frames_to_process) override;
// Called in the main thread when the number of channels for the input may
// have changed.
@@ -73,15 +73,16 @@ class GainNode final : public AudioNode {
public:
static GainNode* Create(BaseAudioContext&, ExceptionState&);
static GainNode* Create(BaseAudioContext*,
- const GainOptions&,
+ const GainOptions*,
ExceptionState&);
+
+ GainNode(BaseAudioContext&);
+
void Trace(blink::Visitor*) override;
AudioParam* gain() const;
private:
- GainNode(BaseAudioContext&);
-
Member<AudioParam> gain_;
};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/iir_filter_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/iir_filter_node.cc
index 96e2aa951bf..d97c8606ef3 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/iir_filter_node.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/iir_filter_node.cc
@@ -145,7 +145,7 @@ IIRFilterNode* IIRFilterNode::Create(BaseAudioContext& context,
bool has_non_zero_coef = false;
- for (size_t k = 0; k < feedforward_coef.size(); ++k) {
+ for (wtf_size_t k = 0; k < feedforward_coef.size(); ++k) {
if (feedforward_coef[k] != 0) {
has_non_zero_coef = true;
break;
@@ -164,7 +164,7 @@ IIRFilterNode* IIRFilterNode::Create(BaseAudioContext& context,
StringBuilder message;
message.Append("Unstable IIRFilter with feedback coefficients: [");
message.AppendNumber(feedback_coef[0]);
- for (size_t k = 1; k < feedback_coef.size(); ++k) {
+ for (wtf_size_t k = 1; k < feedback_coef.size(); ++k) {
message.Append(", ");
message.AppendNumber(feedback_coef[k]);
}
@@ -174,15 +174,15 @@ IIRFilterNode* IIRFilterNode::Create(BaseAudioContext& context,
kJSMessageSource, kWarningMessageLevel, message.ToString()));
}
- return new IIRFilterNode(context, feedforward_coef, feedback_coef,
- is_filter_stable);
+ return MakeGarbageCollected<IIRFilterNode>(context, feedforward_coef,
+ feedback_coef, is_filter_stable);
}
IIRFilterNode* IIRFilterNode::Create(BaseAudioContext* context,
- const IIRFilterOptions& options,
+ const IIRFilterOptions* options,
ExceptionState& exception_state) {
- IIRFilterNode* node = Create(*context, options.feedforward(),
- options.feedback(), exception_state);
+ IIRFilterNode* node = Create(*context, options->feedforward(),
+ options->feedback(), exception_state);
if (!node)
return nullptr;
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/iir_filter_node.h b/chromium/third_party/blink/renderer/modules/webaudio/iir_filter_node.h
index 081987c0b42..a33d99c249a 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/iir_filter_node.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/iir_filter_node.h
@@ -44,9 +44,14 @@ class IIRFilterNode : public AudioNode {
ExceptionState&);
static IIRFilterNode* Create(BaseAudioContext*,
- const IIRFilterOptions&,
+ const IIRFilterOptions*,
ExceptionState&);
+ IIRFilterNode(BaseAudioContext&,
+ const Vector<double>& denominator,
+ const Vector<double>& numerator,
+ bool is_filter_stable);
+
void Trace(blink::Visitor*) override;
// Get the magnitude and phase response of the filter at the given
@@ -57,11 +62,6 @@ class IIRFilterNode : public AudioNode {
ExceptionState&);
private:
- IIRFilterNode(BaseAudioContext&,
- const Vector<double>& denominator,
- const Vector<double>& numerator,
- bool is_filter_stable);
-
IIRProcessor* GetIIRFilterProcessor() const;
};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/iir_processor.cc b/chromium/third_party/blink/renderer/modules/webaudio/iir_processor.cc
index f9b9f8707ca..653b2cfe862 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/iir_processor.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/iir_processor.cc
@@ -10,7 +10,7 @@
namespace blink {
IIRProcessor::IIRProcessor(float sample_rate,
- size_t number_of_channels,
+ uint32_t number_of_channels,
const Vector<double>& feedforward_coef,
const Vector<double>& feedback_coef,
bool is_filter_stable)
@@ -66,7 +66,7 @@ std::unique_ptr<AudioDSPKernel> IIRProcessor::CreateKernel() {
void IIRProcessor::Process(const AudioBus* source,
AudioBus* destination,
- size_t frames_to_process) {
+ uint32_t frames_to_process) {
if (!IsInitialized()) {
destination->Zero();
return;
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/iir_processor.h b/chromium/third_party/blink/renderer/modules/webaudio/iir_processor.h
index e55bf919c2f..a2160eebdc8 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/iir_processor.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/iir_processor.h
@@ -18,7 +18,7 @@ class IIRDSPKernel;
class IIRProcessor final : public AudioDSPKernelProcessor {
public:
IIRProcessor(float sample_rate,
- size_t number_of_channels,
+ uint32_t number_of_channels,
const Vector<double>& feedforward_coef,
const Vector<double>& feedback_coef,
bool is_filter_stable);
@@ -28,7 +28,7 @@ class IIRProcessor final : public AudioDSPKernelProcessor {
void Process(const AudioBus* source,
AudioBus* destination,
- size_t frames_to_process) override;
+ uint32_t frames_to_process) override;
// Get the magnitude and phase response of the filter at the given
// set of frequencies (in Hz). The phase response is in radians.
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/iirdsp_kernel.cc b/chromium/third_party/blink/renderer/modules/webaudio/iirdsp_kernel.cc
index 6aadf7830ca..b24849b8934 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/iirdsp_kernel.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/iirdsp_kernel.cc
@@ -17,7 +17,7 @@ IIRDSPKernel::IIRDSPKernel(IIRProcessor* processor)
void IIRDSPKernel::Process(const float* source,
float* destination,
- size_t frames_to_process) {
+ uint32_t frames_to_process) {
DCHECK(source);
DCHECK(destination);
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/iirdsp_kernel.h b/chromium/third_party/blink/renderer/modules/webaudio/iirdsp_kernel.h
index bdf62ff185c..03979a3cfc0 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/iirdsp_kernel.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/iirdsp_kernel.h
@@ -20,7 +20,7 @@ class IIRDSPKernel final : public AudioDSPKernel {
// AudioDSPKernel
void Process(const float* source,
float* dest,
- size_t frames_to_process) override;
+ uint32_t frames_to_process) override;
void Reset() override { iir_.Reset(); }
// Get the magnitude and phase response of the filter at the given
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/media_element_audio_source_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/media_element_audio_source_node.cc
index d7b07b5fea7..3e5064496d7 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/media_element_audio_source_node.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/media_element_audio_source_node.cc
@@ -83,19 +83,19 @@ void MediaElementAudioSourceHandler::Dispose() {
AudioHandler::Dispose();
}
-void MediaElementAudioSourceHandler::SetFormat(size_t number_of_channels,
+void MediaElementAudioSourceHandler::SetFormat(uint32_t number_of_channels,
float source_sample_rate) {
bool is_tainted = WouldTaintOrigin();
if (is_tainted) {
- PrintCORSMessage(MediaElement()->currentSrc().GetString());
+ PrintCorsMessage(MediaElement()->currentSrc().GetString());
}
if (number_of_channels != source_number_of_channels_ ||
source_sample_rate != source_sample_rate_) {
if (!number_of_channels ||
number_of_channels > BaseAudioContext::MaxNumberOfChannels() ||
- !AudioUtilities::IsValidAudioBufferSampleRate(source_sample_rate)) {
+ !audio_utilities::IsValidAudioBufferSampleRate(source_sample_rate)) {
// process() will generate silence for these uninitialized values.
DLOG(ERROR) << "setFormat(" << number_of_channels << ", "
<< source_sample_rate << ") - unhandled format change";
@@ -139,7 +139,7 @@ bool MediaElementAudioSourceHandler::WouldTaintOrigin() {
return MediaElement()->GetWebMediaPlayer()->WouldTaintOrigin();
}
-void MediaElementAudioSourceHandler::PrintCORSMessage(const String& message) {
+void MediaElementAudioSourceHandler::PrintCorsMessage(const String& message) {
if (Context()->GetExecutionContext()) {
Context()->GetExecutionContext()->AddConsoleMessage(
ConsoleMessage::Create(kSecurityMessageSource, kInfoMessageLevel,
@@ -149,7 +149,7 @@ void MediaElementAudioSourceHandler::PrintCORSMessage(const String& message) {
}
}
-void MediaElementAudioSourceHandler::Process(size_t number_of_frames) {
+void MediaElementAudioSourceHandler::Process(uint32_t number_of_frames) {
AudioBus* output_bus = Output(0).Bus();
// Use a tryLock() to avoid contention in the real-time audio thread.
@@ -232,7 +232,7 @@ MediaElementAudioSourceNode* MediaElementAudioSourceNode::Create(
}
MediaElementAudioSourceNode* node =
- new MediaElementAudioSourceNode(context, media_element);
+ MakeGarbageCollected<MediaElementAudioSourceNode>(context, media_element);
if (node) {
media_element.SetAudioSourceNode(node);
@@ -250,9 +250,9 @@ MediaElementAudioSourceNode* MediaElementAudioSourceNode::Create(
MediaElementAudioSourceNode* MediaElementAudioSourceNode::Create(
AudioContext* context,
- const MediaElementAudioSourceOptions& options,
+ const MediaElementAudioSourceOptions* options,
ExceptionState& exception_state) {
- return Create(*context, *options.mediaElement(), exception_state);
+ return Create(*context, *options->mediaElement(), exception_state);
}
void MediaElementAudioSourceNode::Trace(blink::Visitor* visitor) {
@@ -269,7 +269,7 @@ HTMLMediaElement* MediaElementAudioSourceNode::mediaElement() const {
return GetMediaElementAudioSourceHandler().MediaElement();
}
-void MediaElementAudioSourceNode::SetFormat(size_t number_of_channels,
+void MediaElementAudioSourceNode::SetFormat(uint32_t number_of_channels,
float sample_rate) {
GetMediaElementAudioSourceHandler().SetFormat(number_of_channels,
sample_rate);
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/media_element_audio_source_node.h b/chromium/third_party/blink/renderer/modules/webaudio/media_element_audio_source_node.h
index 9fbb8127720..74599d3fc76 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/media_element_audio_source_node.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/media_element_audio_source_node.h
@@ -53,7 +53,7 @@ class MediaElementAudioSourceHandler final : public AudioHandler {
// AudioHandler
void Dispose() override;
- void Process(size_t frames_to_process) override;
+ void Process(uint32_t frames_to_process) override;
// AudioNode
double TailTime() const override { return 0; }
@@ -61,7 +61,7 @@ class MediaElementAudioSourceHandler final : public AudioHandler {
// Helpers for AudioSourceProviderClient implementation of
// MediaElementAudioSourceNode.
- void SetFormat(size_t number_of_channels, float sample_rate);
+ void SetFormat(uint32_t number_of_channels, float sample_rate);
void lock() EXCLUSIVE_LOCK_FUNCTION(GetProcessLock());
void unlock() UNLOCK_FUNCTION(GetProcessLock());
@@ -84,7 +84,7 @@ class MediaElementAudioSourceHandler final : public AudioHandler {
// Print warning if CORS restrictions cause MediaElementAudioSource to output
// zeroes.
- void PrintCORSMessage(const String& message);
+ void PrintCorsMessage(const String& message);
// This Persistent doesn't make a reference cycle. The reference from
// HTMLMediaElement to AudioSourceProvideClient, which
@@ -119,7 +119,9 @@ class MediaElementAudioSourceNode final : public AudioNode,
HTMLMediaElement&,
ExceptionState&);
static MediaElementAudioSourceNode*
- Create(AudioContext*, const MediaElementAudioSourceOptions&, ExceptionState&);
+ Create(AudioContext*, const MediaElementAudioSourceOptions*, ExceptionState&);
+
+ MediaElementAudioSourceNode(AudioContext&, HTMLMediaElement&);
void Trace(blink::Visitor*) override;
MediaElementAudioSourceHandler& GetMediaElementAudioSourceHandler() const;
@@ -127,14 +129,11 @@ class MediaElementAudioSourceNode final : public AudioNode,
HTMLMediaElement* mediaElement() const;
// AudioSourceProviderClient functions:
- void SetFormat(size_t number_of_channels, float sample_rate) override;
+ void SetFormat(uint32_t number_of_channels, float sample_rate) override;
void lock() override EXCLUSIVE_LOCK_FUNCTION(
GetMediaElementAudioSourceHandler().GetProcessLock());
void unlock() override
UNLOCK_FUNCTION(GetMediaElementAudioSourceHandler().GetProcessLock());
-
- private:
- MediaElementAudioSourceNode(AudioContext&, HTMLMediaElement&);
};
} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_destination_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_destination_node.cc
index 31f983cea2b..5dfad803f08 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_destination_node.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_destination_node.cc
@@ -40,17 +40,17 @@ namespace blink {
// WebAudioCapturerSource ignores the channel count beyond 8, so we set the
// block here to avoid anything can cause the crash.
-static const unsigned long kMaxChannelCount = 8;
+static const uint32_t kMaxChannelCount = 8;
MediaStreamAudioDestinationHandler::MediaStreamAudioDestinationHandler(
AudioNode& node,
- size_t number_of_channels)
+ uint32_t number_of_channels)
: AudioBasicInspectorHandler(kNodeTypeMediaStreamAudioDestination,
node,
node.context()->sampleRate(),
number_of_channels),
mix_bus_(AudioBus::Create(number_of_channels,
- AudioUtilities::kRenderQuantumFrames)) {
+ audio_utilities::kRenderQuantumFrames)) {
source_ = MediaStreamSource::Create("WebAudio-" + CreateCanonicalUUIDString(),
MediaStreamSource::kTypeAudio,
"MediaStreamAudioDestinationNode", false,
@@ -72,7 +72,7 @@ MediaStreamAudioDestinationHandler::MediaStreamAudioDestinationHandler(
scoped_refptr<MediaStreamAudioDestinationHandler>
MediaStreamAudioDestinationHandler::Create(AudioNode& node,
- size_t number_of_channels) {
+ uint32_t number_of_channels) {
return base::AdoptRef(
new MediaStreamAudioDestinationHandler(node, number_of_channels));
}
@@ -81,7 +81,7 @@ MediaStreamAudioDestinationHandler::~MediaStreamAudioDestinationHandler() {
Uninitialize();
}
-void MediaStreamAudioDestinationHandler::Process(size_t number_of_frames) {
+void MediaStreamAudioDestinationHandler::Process(uint32_t number_of_frames) {
// Conform the input bus into the internal mix bus, which represents
// MediaStreamDestination's channel count.
@@ -95,7 +95,7 @@ void MediaStreamAudioDestinationHandler::Process(size_t number_of_frames) {
if (try_locker.Locked()) {
unsigned count = ChannelCount();
if (count != mix_bus_->NumberOfChannels()) {
- mix_bus_ = AudioBus::Create(count, AudioUtilities::kRenderQuantumFrames);
+ mix_bus_ = AudioBus::Create(count, audio_utilities::kRenderQuantumFrames);
// setAudioFormat has an internal lock. This can cause audio to
// glitch. This is outside of our control.
source_->SetAudioFormat(count, Context()->sampleRate());
@@ -110,7 +110,7 @@ void MediaStreamAudioDestinationHandler::Process(size_t number_of_frames) {
}
void MediaStreamAudioDestinationHandler::SetChannelCount(
- unsigned long channel_count,
+ unsigned channel_count,
ExceptionState& exception_state) {
DCHECK(IsMainThread());
@@ -135,7 +135,7 @@ void MediaStreamAudioDestinationHandler::SetChannelCount(
AudioHandler::SetChannelCount(channel_count, exception_state);
}
-unsigned long MediaStreamAudioDestinationHandler::MaxChannelCount() const {
+uint32_t MediaStreamAudioDestinationHandler::MaxChannelCount() const {
return kMaxChannelCount;
}
@@ -143,7 +143,7 @@ unsigned long MediaStreamAudioDestinationHandler::MaxChannelCount() const {
MediaStreamAudioDestinationNode::MediaStreamAudioDestinationNode(
AudioContext& context,
- size_t number_of_channels)
+ uint32_t number_of_channels)
: AudioBasicInspectorNode(context) {
SetHandler(
MediaStreamAudioDestinationHandler::Create(*this, number_of_channels));
@@ -151,7 +151,7 @@ MediaStreamAudioDestinationNode::MediaStreamAudioDestinationNode(
MediaStreamAudioDestinationNode* MediaStreamAudioDestinationNode::Create(
AudioContext& context,
- size_t number_of_channels,
+ uint32_t number_of_channels,
ExceptionState& exception_state) {
DCHECK(IsMainThread());
@@ -160,25 +160,26 @@ MediaStreamAudioDestinationNode* MediaStreamAudioDestinationNode::Create(
return nullptr;
}
- return new MediaStreamAudioDestinationNode(context, number_of_channels);
+ return MakeGarbageCollected<MediaStreamAudioDestinationNode>(
+ context, number_of_channels);
}
MediaStreamAudioDestinationNode* MediaStreamAudioDestinationNode::Create(
AudioContext* context,
- const AudioNodeOptions& options,
+ const AudioNodeOptions* options,
ExceptionState& exception_state) {
DCHECK(IsMainThread());
// Default to stereo; |options| will update it approriately if needed.
MediaStreamAudioDestinationNode* node =
- new MediaStreamAudioDestinationNode(*context, 2);
+ MakeGarbageCollected<MediaStreamAudioDestinationNode>(*context, 2);
// Need to handle channelCount here ourselves because the upper
// limit is different from the normal AudioNode::setChannelCount
// limit of 32. Error messages will sometimes show the wrong
// limits.
- if (options.hasChannelCount())
- node->setChannelCount(options.channelCount(), exception_state);
+ if (options->hasChannelCount())
+ node->setChannelCount(options->channelCount(), exception_state);
node->HandleChannelOptions(options, exception_state);
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_destination_node.h b/chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_destination_node.h
index 427d924d5d4..78bbf3dfacc 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_destination_node.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_destination_node.h
@@ -40,21 +40,21 @@ class MediaStreamAudioDestinationHandler final
public:
static scoped_refptr<MediaStreamAudioDestinationHandler> Create(
AudioNode&,
- size_t number_of_channels);
+ uint32_t number_of_channels);
~MediaStreamAudioDestinationHandler() override;
MediaStream* Stream() { return stream_.Get(); }
// AudioHandler.
- void Process(size_t frames_to_process) override;
- void SetChannelCount(unsigned long, ExceptionState&) override;
+ void Process(uint32_t frames_to_process) override;
+ void SetChannelCount(unsigned, ExceptionState&) override;
- unsigned long MaxChannelCount() const;
+ uint32_t MaxChannelCount() const;
bool RequiresTailProcessing() const final { return false; }
private:
- MediaStreamAudioDestinationHandler(AudioNode&, size_t number_of_channels);
+ MediaStreamAudioDestinationHandler(AudioNode&, uint32_t number_of_channels);
// As an audio source, we will never propagate silence.
bool PropagatesSilence() const override { return false; }
@@ -79,16 +79,15 @@ class MediaStreamAudioDestinationNode final : public AudioBasicInspectorNode {
public:
static MediaStreamAudioDestinationNode* Create(AudioContext&,
- size_t number_of_channels,
+ uint32_t number_of_channels,
ExceptionState&);
static MediaStreamAudioDestinationNode* Create(AudioContext*,
- const AudioNodeOptions&,
+ const AudioNodeOptions*,
ExceptionState&);
- MediaStream* stream() const;
+ MediaStreamAudioDestinationNode(AudioContext&, uint32_t number_of_channels);
- private:
- MediaStreamAudioDestinationNode(AudioContext&, size_t number_of_channels);
+ MediaStream* stream() const;
};
} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_source_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_source_node.cc
index 488a5380262..35c7dbbc061 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_source_node.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_source_node.cc
@@ -61,7 +61,7 @@ MediaStreamAudioSourceHandler::~MediaStreamAudioSourceHandler() {
Uninitialize();
}
-void MediaStreamAudioSourceHandler::SetFormat(size_t number_of_channels,
+void MediaStreamAudioSourceHandler::SetFormat(uint32_t number_of_channels,
float source_sample_rate) {
if (number_of_channels != source_number_of_channels_ ||
source_sample_rate != Context()->sampleRate()) {
@@ -91,7 +91,7 @@ void MediaStreamAudioSourceHandler::SetFormat(size_t number_of_channels,
}
}
-void MediaStreamAudioSourceHandler::Process(size_t number_of_frames) {
+void MediaStreamAudioSourceHandler::Process(uint32_t number_of_frames) {
AudioBus* output_bus = Output(0).Bus();
if (!GetAudioSourceProvider()) {
@@ -153,8 +153,9 @@ MediaStreamAudioSourceNode* MediaStreamAudioSourceNode::Create(
std::unique_ptr<AudioSourceProvider> provider =
audio_track->CreateWebAudioSource();
- MediaStreamAudioSourceNode* node = new MediaStreamAudioSourceNode(
- context, media_stream, audio_track, std::move(provider));
+ MediaStreamAudioSourceNode* node =
+ MakeGarbageCollected<MediaStreamAudioSourceNode>(
+ context, media_stream, audio_track, std::move(provider));
if (!node)
return nullptr;
@@ -170,9 +171,9 @@ MediaStreamAudioSourceNode* MediaStreamAudioSourceNode::Create(
MediaStreamAudioSourceNode* MediaStreamAudioSourceNode::Create(
AudioContext* context,
- const MediaStreamAudioSourceOptions& options,
+ const MediaStreamAudioSourceOptions* options,
ExceptionState& exception_state) {
- return Create(*context, *options.mediaStream(), exception_state);
+ return Create(*context, *options->mediaStream(), exception_state);
}
void MediaStreamAudioSourceNode::Trace(blink::Visitor* visitor) {
@@ -191,7 +192,7 @@ MediaStream* MediaStreamAudioSourceNode::getMediaStream() const {
return media_stream_;
}
-void MediaStreamAudioSourceNode::SetFormat(size_t number_of_channels,
+void MediaStreamAudioSourceNode::SetFormat(uint32_t number_of_channels,
float source_sample_rate) {
GetMediaStreamAudioSourceHandler().SetFormat(number_of_channels,
source_sample_rate);
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_source_node.h b/chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_source_node.h
index 21bd3e48818..6f1b4de8dd2 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_source_node.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_source_node.h
@@ -47,7 +47,7 @@ class MediaStreamAudioSourceHandler final : public AudioHandler {
~MediaStreamAudioSourceHandler() override;
// AudioHandler
- void Process(size_t frames_to_process) override;
+ void Process(uint32_t frames_to_process) override;
// AudioNode
double TailTime() const override { return 0; }
@@ -55,7 +55,7 @@ class MediaStreamAudioSourceHandler final : public AudioHandler {
// A helper for AudioSourceProviderClient implementation of
// MediaStreamAudioSourceNode.
- void SetFormat(size_t number_of_channels, float sample_rate);
+ void SetFormat(uint32_t number_of_channels, float sample_rate);
bool RequiresTailProcessing() const final { return false; }
@@ -87,21 +87,21 @@ class MediaStreamAudioSourceNode final : public AudioNode,
MediaStream&,
ExceptionState&);
static MediaStreamAudioSourceNode*
- Create(AudioContext*, const MediaStreamAudioSourceOptions&, ExceptionState&);
+ Create(AudioContext*, const MediaStreamAudioSourceOptions*, ExceptionState&);
+
+ MediaStreamAudioSourceNode(AudioContext&,
+ MediaStream&,
+ MediaStreamTrack*,
+ std::unique_ptr<AudioSourceProvider>);
void Trace(blink::Visitor*) override;
MediaStream* getMediaStream() const;
// AudioSourceProviderClient functions:
- void SetFormat(size_t number_of_channels, float sample_rate) override;
+ void SetFormat(uint32_t number_of_channels, float sample_rate) override;
private:
- MediaStreamAudioSourceNode(AudioContext&,
- MediaStream&,
- MediaStreamTrack*,
- std::unique_ptr<AudioSourceProvider>);
-
MediaStreamAudioSourceHandler& GetMediaStreamAudioSourceHandler() const;
Member<MediaStreamTrack> audio_track_;
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_completion_event.cc b/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_completion_event.cc
index 29ae509469e..9b33de63654 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_completion_event.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_completion_event.cc
@@ -30,38 +30,39 @@
namespace blink {
OfflineAudioCompletionEvent* OfflineAudioCompletionEvent::Create() {
- return new OfflineAudioCompletionEvent;
+ return MakeGarbageCollected<OfflineAudioCompletionEvent>();
}
OfflineAudioCompletionEvent* OfflineAudioCompletionEvent::Create(
AudioBuffer* rendered_buffer) {
- return new OfflineAudioCompletionEvent(rendered_buffer);
+ return MakeGarbageCollected<OfflineAudioCompletionEvent>(rendered_buffer);
}
OfflineAudioCompletionEvent* OfflineAudioCompletionEvent::Create(
const AtomicString& event_type,
- const OfflineAudioCompletionEventInit& event_init) {
- return new OfflineAudioCompletionEvent(event_type, event_init);
+ const OfflineAudioCompletionEventInit* event_init) {
+ return MakeGarbageCollected<OfflineAudioCompletionEvent>(event_type,
+ event_init);
}
OfflineAudioCompletionEvent::OfflineAudioCompletionEvent() = default;
OfflineAudioCompletionEvent::OfflineAudioCompletionEvent(
AudioBuffer* rendered_buffer)
- : Event(EventTypeNames::complete, Bubbles::kYes, Cancelable::kNo),
+ : Event(event_type_names::kComplete, Bubbles::kYes, Cancelable::kNo),
rendered_buffer_(rendered_buffer) {}
OfflineAudioCompletionEvent::OfflineAudioCompletionEvent(
const AtomicString& event_type,
- const OfflineAudioCompletionEventInit& event_init)
+ const OfflineAudioCompletionEventInit* event_init)
: Event(event_type, event_init) {
- rendered_buffer_ = event_init.renderedBuffer();
+ rendered_buffer_ = event_init->renderedBuffer();
}
OfflineAudioCompletionEvent::~OfflineAudioCompletionEvent() = default;
const AtomicString& OfflineAudioCompletionEvent::InterfaceName() const {
- return EventNames::OfflineAudioCompletionEvent;
+ return event_interface_names::kOfflineAudioCompletionEvent;
}
void OfflineAudioCompletionEvent::Trace(blink::Visitor* visitor) {
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_completion_event.h b/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_completion_event.h
index e08e9909ee1..f897a794725 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_completion_event.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_completion_event.h
@@ -44,8 +44,12 @@ class OfflineAudioCompletionEvent final : public Event {
static OfflineAudioCompletionEvent* Create(AudioBuffer* rendered_buffer);
static OfflineAudioCompletionEvent* Create(
const AtomicString& type,
- const OfflineAudioCompletionEventInit&);
+ const OfflineAudioCompletionEventInit*);
+ OfflineAudioCompletionEvent();
+ explicit OfflineAudioCompletionEvent(AudioBuffer* rendered_buffer);
+ explicit OfflineAudioCompletionEvent(const AtomicString& type,
+ const OfflineAudioCompletionEventInit*);
~OfflineAudioCompletionEvent() override;
AudioBuffer* renderedBuffer() { return rendered_buffer_.Get(); }
@@ -55,11 +59,6 @@ class OfflineAudioCompletionEvent final : public Event {
void Trace(blink::Visitor*) override;
private:
- OfflineAudioCompletionEvent();
- explicit OfflineAudioCompletionEvent(AudioBuffer* rendered_buffer);
- explicit OfflineAudioCompletionEvent(const AtomicString& type,
- const OfflineAudioCompletionEventInit&);
-
Member<AudioBuffer> rendered_buffer_;
};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_context.cc b/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_context.cc
index ba5601767ff..5c447b8cbb0 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_context.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_context.cc
@@ -77,21 +77,22 @@ OfflineAudioContext* OfflineAudioContext::Create(
return nullptr;
}
- if (!AudioUtilities::IsValidAudioBufferSampleRate(sample_rate)) {
+ if (!audio_utilities::IsValidAudioBufferSampleRate(sample_rate)) {
exception_state.ThrowDOMException(
DOMExceptionCode::kNotSupportedError,
ExceptionMessages::IndexOutsideRange(
"sampleRate", sample_rate,
- AudioUtilities::MinAudioBufferSampleRate(),
+ audio_utilities::MinAudioBufferSampleRate(),
ExceptionMessages::kInclusiveBound,
- AudioUtilities::MaxAudioBufferSampleRate(),
+ audio_utilities::MaxAudioBufferSampleRate(),
ExceptionMessages::kInclusiveBound));
return nullptr;
}
OfflineAudioContext* audio_context =
- new OfflineAudioContext(document, number_of_channels, number_of_frames,
- sample_rate, exception_state);
+ MakeGarbageCollected<OfflineAudioContext>(document, number_of_channels,
+ number_of_frames, sample_rate,
+ exception_state);
audio_context->PauseIfNeeded();
#if DEBUG_AUDIONODE_REFERENCES
@@ -106,8 +107,8 @@ OfflineAudioContext* OfflineAudioContext::Create(
("WebAudio.OfflineAudioContext.Length", 1, 1000000, 50));
// The limits are the min and max AudioBuffer sample rates currently
// supported. We use explicit values here instead of
- // AudioUtilities::minAudioBufferSampleRate() and
- // AudioUtilities::maxAudioBufferSampleRate(). The number of buckets is
+ // audio_utilities::minAudioBufferSampleRate() and
+ // audio_utilities::maxAudioBufferSampleRate(). The number of buckets is
// fairly arbitrary.
DEFINE_STATIC_LOCAL(
CustomCountHistogram, offline_context_sample_rate_histogram,
@@ -122,18 +123,18 @@ OfflineAudioContext* OfflineAudioContext::Create(
OfflineAudioContext* OfflineAudioContext::Create(
ExecutionContext* context,
- const OfflineAudioContextOptions& options,
+ const OfflineAudioContextOptions* options,
ExceptionState& exception_state) {
OfflineAudioContext* offline_context =
- Create(context, options.numberOfChannels(), options.length(),
- options.sampleRate(), exception_state);
+ Create(context, options->numberOfChannels(), options->length(),
+ options->sampleRate(), exception_state);
return offline_context;
}
OfflineAudioContext::OfflineAudioContext(Document* document,
unsigned number_of_channels,
- size_t number_of_frames,
+ uint32_t number_of_frames,
float sample_rate,
ExceptionState& exception_state)
: BaseAudioContext(document, kOfflineContext),
@@ -267,7 +268,8 @@ ScriptPromise OfflineAudioContext::suspendContext(ScriptState* script_state,
// The specified suspend time is in the past; reject the promise.
if (frame < CurrentSampleFrame()) {
- size_t current_frame_clamped = std::min(CurrentSampleFrame(), length());
+ size_t current_frame_clamped =
+ std::min(CurrentSampleFrame(), static_cast<size_t>(length()));
double current_time_clamped =
std::min(currentTime(), length() / static_cast<double>(sampleRate()));
resolver->Reject(DOMException::Create(
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_context.h b/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_context.h
index 17e47893dc1..22872f75c52 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_context.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_context.h
@@ -47,14 +47,19 @@ class MODULES_EXPORT OfflineAudioContext final : public BaseAudioContext {
ExceptionState&);
static OfflineAudioContext* Create(ExecutionContext*,
- const OfflineAudioContextOptions&,
+ const OfflineAudioContextOptions*,
ExceptionState&);
+ OfflineAudioContext(Document*,
+ unsigned number_of_channels,
+ uint32_t number_of_frames,
+ float sample_rate,
+ ExceptionState&);
~OfflineAudioContext() override;
void Trace(blink::Visitor*) override;
- size_t length() const { return total_render_frames_; }
+ uint32_t length() const { return total_render_frames_; }
ScriptPromise startOfflineRendering(ScriptState*);
@@ -65,7 +70,7 @@ class MODULES_EXPORT OfflineAudioContext final : public BaseAudioContext {
bool HasRealtimeConstraint() final { return false; }
- DEFINE_ATTRIBUTE_EVENT_LISTENER(complete);
+ DEFINE_ATTRIBUTE_EVENT_LISTENER(complete, kComplete);
// Fire completion event when the rendering is finished.
void FireCompletionEvent();
@@ -97,12 +102,6 @@ class MODULES_EXPORT OfflineAudioContext final : public BaseAudioContext {
bool HasPendingActivity() const final;
private:
- OfflineAudioContext(Document*,
- unsigned number_of_channels,
- size_t number_of_frames,
- float sample_rate,
- ExceptionState&);
-
// Fetch directly the destination handler.
OfflineAudioDestinationHandler& DestinationHandler();
@@ -130,7 +129,7 @@ class MODULES_EXPORT OfflineAudioContext final : public BaseAudioContext {
bool is_rendering_started_;
// Total render sample length.
- size_t total_render_frames_;
+ uint32_t total_render_frames_;
};
} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_destination_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_destination_node.cc
index fd5e12d5934..e854d861998 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_destination_node.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_destination_node.cc
@@ -44,7 +44,7 @@ namespace blink {
OfflineAudioDestinationHandler::OfflineAudioDestinationHandler(
AudioNode& node,
unsigned number_of_channels,
- size_t frames_to_process,
+ uint32_t frames_to_process,
float sample_rate)
: AudioDestinationHandler(node),
render_target_(nullptr),
@@ -68,7 +68,7 @@ OfflineAudioDestinationHandler::OfflineAudioDestinationHandler(
scoped_refptr<OfflineAudioDestinationHandler>
OfflineAudioDestinationHandler::Create(AudioNode& node,
unsigned number_of_channels,
- size_t frames_to_process,
+ uint32_t frames_to_process,
float sample_rate) {
return base::AdoptRef(new OfflineAudioDestinationHandler(
node, number_of_channels, frames_to_process, sample_rate));
@@ -103,7 +103,7 @@ OfflineAudioContext* OfflineAudioDestinationHandler::Context() const {
return static_cast<OfflineAudioContext*>(AudioDestinationHandler::Context());
}
-unsigned long OfflineAudioDestinationHandler::MaxChannelCount() const {
+uint32_t OfflineAudioDestinationHandler::MaxChannelCount() const {
return channel_count_;
}
@@ -141,7 +141,7 @@ void OfflineAudioDestinationHandler::InitializeOfflineRenderThread(
render_target_ = render_target;
render_bus_ = AudioBus::Create(render_target->numberOfChannels(),
- AudioUtilities::kRenderQuantumFrames);
+ audio_utilities::kRenderQuantumFrames);
DCHECK(render_bus_);
PrepareTaskRunnerForRendering();
@@ -166,7 +166,7 @@ void OfflineAudioDestinationHandler::StartOfflineRendering() {
return;
bool is_render_bus_allocated =
- render_bus_->length() >= AudioUtilities::kRenderQuantumFrames;
+ render_bus_->length() >= audio_utilities::kRenderQuantumFrames;
DCHECK(is_render_bus_allocated);
if (!is_render_bus_allocated)
return;
@@ -209,12 +209,11 @@ void OfflineAudioDestinationHandler::DoOfflineRendering() {
// Suspend the rendering if a scheduled suspend found at the current
// sample frame. Otherwise render one quantum.
if (RenderIfNotSuspended(nullptr, render_bus_.get(),
- AudioUtilities::kRenderQuantumFrames))
+ audio_utilities::kRenderQuantumFrames))
return;
- size_t frames_available_to_copy =
- std::min(frames_to_process_,
- static_cast<size_t>(AudioUtilities::kRenderQuantumFrames));
+ uint32_t frames_available_to_copy =
+ std::min(frames_to_process_, audio_utilities::kRenderQuantumFrames);
for (unsigned channel_index = 0; channel_index < number_of_channels;
++channel_index) {
@@ -279,7 +278,7 @@ void OfflineAudioDestinationHandler::NotifyComplete() {
bool OfflineAudioDestinationHandler::RenderIfNotSuspended(
AudioBus* source_bus,
AudioBus* destination_bus,
- size_t number_of_frames) {
+ uint32_t number_of_frames) {
// We don't want denormals slowing down any of the audio processing
// since they can very seriously hurt performance.
// This will take care of all AudioNodes because they all process within this
@@ -340,8 +339,7 @@ bool OfflineAudioDestinationHandler::RenderIfNotSuspended(
Context()->HandlePostOfflineRenderTasks();
// Advance current sample-frame.
- size_t new_sample_frame = current_sample_frame_ + number_of_frames;
- ReleaseStore(&current_sample_frame_, new_sample_frame);
+ AdvanceCurrentSampleFrame(number_of_frames);
Context()->UpdateWorkletGlobalScopeOnRenderingThread();
@@ -395,7 +393,7 @@ void OfflineAudioDestinationHandler::RestartRendering() {
OfflineAudioDestinationNode::OfflineAudioDestinationNode(
BaseAudioContext& context,
unsigned number_of_channels,
- size_t frames_to_process,
+ uint32_t frames_to_process,
float sample_rate)
: AudioDestinationNode(context) {
SetHandler(OfflineAudioDestinationHandler::Create(
@@ -405,10 +403,10 @@ OfflineAudioDestinationNode::OfflineAudioDestinationNode(
OfflineAudioDestinationNode* OfflineAudioDestinationNode::Create(
BaseAudioContext* context,
unsigned number_of_channels,
- size_t frames_to_process,
+ uint32_t frames_to_process,
float sample_rate) {
- return new OfflineAudioDestinationNode(*context, number_of_channels,
- frames_to_process, sample_rate);
+ return MakeGarbageCollected<OfflineAudioDestinationNode>(
+ *context, number_of_channels, frames_to_process, sample_rate);
}
} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_destination_node.h b/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_destination_node.h
index dc658d2a2f7..df9ffd4edb5 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_destination_node.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_destination_node.h
@@ -44,7 +44,7 @@ class OfflineAudioDestinationHandler final : public AudioDestinationHandler {
static scoped_refptr<OfflineAudioDestinationHandler> Create(
AudioNode&,
unsigned number_of_channels,
- size_t frames_to_process,
+ uint32_t frames_to_process,
float sample_rate);
~OfflineAudioDestinationHandler() override;
@@ -62,14 +62,14 @@ class OfflineAudioDestinationHandler final : public AudioDestinationHandler {
// AudioDestinationHandler
void StartRendering() override;
void StopRendering() override;
- unsigned long MaxChannelCount() const override;
+ uint32_t MaxChannelCount() const override;
void RestartRendering() override;
double SampleRate() const override { return sample_rate_; }
size_t RenderQuantumFrames() const {
- return AudioUtilities::kRenderQuantumFrames;
+ return audio_utilities::kRenderQuantumFrames;
}
// This is called when rendering of the offline context is started
@@ -86,7 +86,7 @@ class OfflineAudioDestinationHandler final : public AudioDestinationHandler {
private:
OfflineAudioDestinationHandler(AudioNode&,
unsigned number_of_channels,
- size_t frames_to_process,
+ uint32_t frames_to_process,
float sample_rate);
// Set up the rendering and start. After setting the context up, it will
@@ -113,7 +113,7 @@ class OfflineAudioDestinationHandler final : public AudioDestinationHandler {
// Otherwise, it returns false after rendering one quantum.
bool RenderIfNotSuspended(AudioBus* source_bus,
AudioBus* destination_bus,
- size_t number_of_frames);
+ uint32_t number_of_frames);
// Prepares a task runner for the rendering based on the operation mode
// (i.e. non-AudioWorklet or AudioWorklet). This is called when the
@@ -133,7 +133,7 @@ class OfflineAudioDestinationHandler final : public AudioDestinationHandler {
// These variables are for counting the number of frames for the current
// progress and the remaining frames to be processed.
size_t frames_processed_;
- size_t frames_to_process_;
+ uint32_t frames_to_process_;
// This flag is necessary to distinguish the state of the context between
// 'created' and 'suspended'. If this flag is false and the current state
@@ -155,13 +155,12 @@ class OfflineAudioDestinationNode final : public AudioDestinationNode {
public:
static OfflineAudioDestinationNode* Create(BaseAudioContext*,
unsigned number_of_channels,
- size_t frames_to_process,
+ uint32_t frames_to_process,
float sample_rate);
- private:
OfflineAudioDestinationNode(BaseAudioContext&,
unsigned number_of_channels,
- size_t frames_to_process,
+ uint32_t frames_to_process,
float sample_rate);
};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/oscillator_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/oscillator_node.cc
index c1f07b9f888..80fc5596d2c 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/oscillator_node.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/oscillator_node.cc
@@ -36,7 +36,7 @@
namespace blink {
-using namespace VectorMath;
+using namespace vector_math;
OscillatorHandler::OscillatorHandler(AudioNode& node,
float sample_rate,
@@ -49,8 +49,8 @@ OscillatorHandler::OscillatorHandler(AudioNode& node,
detune_(&detune),
first_render_(true),
virtual_read_index_(0),
- phase_increments_(AudioUtilities::kRenderQuantumFrames),
- detune_values_(AudioUtilities::kRenderQuantumFrames) {
+ phase_increments_(audio_utilities::kRenderQuantumFrames),
+ detune_values_(audio_utilities::kRenderQuantumFrames) {
if (wave_table) {
// A PeriodicWave overrides any value for the oscillator type,
// forcing the type to be 'custom".
@@ -177,7 +177,7 @@ static void ClampFrequency(float* frequency,
}
bool OscillatorHandler::CalculateSampleAccuratePhaseIncrements(
- size_t frames_to_process) {
+ uint32_t frames_to_process) {
bool is_good = frames_to_process <= phase_increments_.size() &&
frames_to_process <= detune_values_.size();
DCHECK(is_good);
@@ -354,7 +354,7 @@ static float DoInterpolation(double virtual_read_index,
return sample;
}
-void OscillatorHandler::Process(size_t frames_to_process) {
+void OscillatorHandler::Process(uint32_t frames_to_process) {
AudioBus* output_bus = Output(0).Bus();
if (!IsInitialized() || !output_bus->NumberOfChannels()) {
@@ -382,7 +382,7 @@ void OscillatorHandler::Process(size_t frames_to_process) {
}
size_t quantum_frame_offset;
- size_t non_silent_frames_to_process;
+ uint32_t non_silent_frames_to_process;
double start_frame_offset;
std::tie(quantum_frame_offset, non_silent_frames_to_process,
@@ -529,29 +529,30 @@ OscillatorNode* OscillatorNode::Create(BaseAudioContext& context,
return nullptr;
}
- return new OscillatorNode(context, oscillator_type, wave_table);
+ return MakeGarbageCollected<OscillatorNode>(context, oscillator_type,
+ wave_table);
}
OscillatorNode* OscillatorNode::Create(BaseAudioContext* context,
- const OscillatorOptions& options,
+ const OscillatorOptions* options,
ExceptionState& exception_state) {
- if (options.type() == "custom" && !options.hasPeriodicWave()) {
+ if (options->type() == "custom" && !options->hasPeriodicWave()) {
exception_state.ThrowDOMException(
DOMExceptionCode::kInvalidStateError,
"A PeriodicWave must be specified if the type is set to \"custom\"");
return nullptr;
}
- OscillatorNode* node =
- Create(*context, options.type(), options.periodicWave(), exception_state);
+ OscillatorNode* node = Create(*context, options->type(),
+ options->periodicWave(), exception_state);
if (!node)
return nullptr;
node->HandleChannelOptions(options, exception_state);
- node->detune()->setValue(options.detune());
- node->frequency()->setValue(options.frequency());
+ node->detune()->setValue(options->detune());
+ node->frequency()->setValue(options->frequency());
return node;
}
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/oscillator_node.h b/chromium/third_party/blink/renderer/modules/webaudio/oscillator_node.h
index 1270d42483c..a3840ecf957 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/oscillator_node.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/oscillator_node.h
@@ -57,7 +57,7 @@ class OscillatorHandler final : public AudioScheduledSourceHandler {
~OscillatorHandler() override;
// AudioHandler
- void Process(size_t frames_to_process) override;
+ void Process(uint32_t frames_to_process) override;
String GetType() const;
void SetType(const String&, ExceptionState&);
@@ -74,7 +74,7 @@ class OscillatorHandler final : public AudioScheduledSourceHandler {
bool SetType(unsigned); // Returns true on success.
// Returns true if there are sample-accurate timeline parameter changes.
- bool CalculateSampleAccuratePhaseIncrements(size_t frames_to_process);
+ bool CalculateSampleAccuratePhaseIncrements(uint32_t frames_to_process);
bool PropagatesSilence() const override;
@@ -113,8 +113,12 @@ class OscillatorNode final : public AudioScheduledSourceNode {
PeriodicWave* wave_table,
ExceptionState&);
static OscillatorNode* Create(BaseAudioContext*,
- const OscillatorOptions&,
+ const OscillatorOptions*,
ExceptionState&);
+
+ OscillatorNode(BaseAudioContext&,
+ const String& oscillator_type,
+ PeriodicWave* wave_table);
void Trace(blink::Visitor*) override;
String type() const;
@@ -124,9 +128,6 @@ class OscillatorNode final : public AudioScheduledSourceNode {
void setPeriodicWave(PeriodicWave*);
private:
- OscillatorNode(BaseAudioContext&,
- const String& oscillator_type,
- PeriodicWave* wave_table);
OscillatorHandler& GetOscillatorHandler() const;
Member<AudioParam> frequency_;
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/panner_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/panner_node.cc
index fac20df4477..3eca10bb952 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/panner_node.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/panner_node.cc
@@ -99,7 +99,7 @@ PannerHandler::~PannerHandler() {
Uninitialize();
}
-void PannerHandler::Process(size_t frames_to_process) {
+void PannerHandler::Process(uint32_t frames_to_process) {
AudioBus* destination = Output(0).Bus();
if (!IsInitialized() || !panner_.get()) {
@@ -162,18 +162,18 @@ void PannerHandler::Process(size_t frames_to_process) {
void PannerHandler::ProcessSampleAccurateValues(AudioBus* destination,
const AudioBus* source,
- size_t frames_to_process) {
- CHECK_LE(frames_to_process, AudioUtilities::kRenderQuantumFrames);
+ uint32_t frames_to_process) {
+ CHECK_LE(frames_to_process, audio_utilities::kRenderQuantumFrames);
// Get the sample accurate values from all of the AudioParams, including the
// values from the AudioListener.
- float panner_x[AudioUtilities::kRenderQuantumFrames];
- float panner_y[AudioUtilities::kRenderQuantumFrames];
- float panner_z[AudioUtilities::kRenderQuantumFrames];
+ float panner_x[audio_utilities::kRenderQuantumFrames];
+ float panner_y[audio_utilities::kRenderQuantumFrames];
+ float panner_z[audio_utilities::kRenderQuantumFrames];
- float orientation_x[AudioUtilities::kRenderQuantumFrames];
- float orientation_y[AudioUtilities::kRenderQuantumFrames];
- float orientation_z[AudioUtilities::kRenderQuantumFrames];
+ float orientation_x[audio_utilities::kRenderQuantumFrames];
+ float orientation_y[audio_utilities::kRenderQuantumFrames];
+ float orientation_z[audio_utilities::kRenderQuantumFrames];
position_x_->CalculateSampleAccurateValues(panner_x, frames_to_process);
position_y_->CalculateSampleAccurateValues(panner_y, frames_to_process);
@@ -187,30 +187,30 @@ void PannerHandler::ProcessSampleAccurateValues(AudioBus* destination,
// Get the automation values from the listener.
const float* listener_x =
- Listener()->GetPositionXValues(AudioUtilities::kRenderQuantumFrames);
+ Listener()->GetPositionXValues(audio_utilities::kRenderQuantumFrames);
const float* listener_y =
- Listener()->GetPositionYValues(AudioUtilities::kRenderQuantumFrames);
+ Listener()->GetPositionYValues(audio_utilities::kRenderQuantumFrames);
const float* listener_z =
- Listener()->GetPositionZValues(AudioUtilities::kRenderQuantumFrames);
+ Listener()->GetPositionZValues(audio_utilities::kRenderQuantumFrames);
const float* forward_x =
- Listener()->GetForwardXValues(AudioUtilities::kRenderQuantumFrames);
+ Listener()->GetForwardXValues(audio_utilities::kRenderQuantumFrames);
const float* forward_y =
- Listener()->GetForwardYValues(AudioUtilities::kRenderQuantumFrames);
+ Listener()->GetForwardYValues(audio_utilities::kRenderQuantumFrames);
const float* forward_z =
- Listener()->GetForwardZValues(AudioUtilities::kRenderQuantumFrames);
+ Listener()->GetForwardZValues(audio_utilities::kRenderQuantumFrames);
const float* up_x =
- Listener()->GetUpXValues(AudioUtilities::kRenderQuantumFrames);
+ Listener()->GetUpXValues(audio_utilities::kRenderQuantumFrames);
const float* up_y =
- Listener()->GetUpYValues(AudioUtilities::kRenderQuantumFrames);
+ Listener()->GetUpYValues(audio_utilities::kRenderQuantumFrames);
const float* up_z =
- Listener()->GetUpZValues(AudioUtilities::kRenderQuantumFrames);
+ Listener()->GetUpZValues(audio_utilities::kRenderQuantumFrames);
// Compute the azimuth, elevation, and total gains for each position.
- double azimuth[AudioUtilities::kRenderQuantumFrames];
- double elevation[AudioUtilities::kRenderQuantumFrames];
- float total_gain[AudioUtilities::kRenderQuantumFrames];
+ double azimuth[audio_utilities::kRenderQuantumFrames];
+ double elevation[audio_utilities::kRenderQuantumFrames];
+ float total_gain[audio_utilities::kRenderQuantumFrames];
for (unsigned k = 0; k < frames_to_process; ++k) {
FloatPoint3D panner_position(panner_x[k], panner_y[k], panner_z[k]);
@@ -235,10 +235,10 @@ void PannerHandler::ProcessSampleAccurateValues(AudioBus* destination,
frames_to_process);
}
-void PannerHandler::ProcessOnlyAudioParams(size_t frames_to_process) {
- float values[AudioUtilities::kRenderQuantumFrames];
+void PannerHandler::ProcessOnlyAudioParams(uint32_t frames_to_process) {
+ float values[audio_utilities::kRenderQuantumFrames];
- DCHECK_LE(frames_to_process, AudioUtilities::kRenderQuantumFrames);
+ DCHECK_LE(frames_to_process, audio_utilities::kRenderQuantumFrames);
position_x_->CalculateSampleAccurateValues(values, frames_to_process);
position_y_->CalculateSampleAccurateValues(values, frames_to_process);
@@ -577,7 +577,7 @@ void PannerHandler::MarkPannerAsDirty(unsigned dirty) {
is_distance_cone_gain_dirty_ = true;
}
-void PannerHandler::SetChannelCount(unsigned long channel_count,
+void PannerHandler::SetChannelCount(unsigned channel_count,
ExceptionState& exception_state) {
DCHECK(IsMainThread());
BaseAudioContext::GraphAutoLocker locker(Context());
@@ -714,11 +714,11 @@ PannerNode* PannerNode::Create(BaseAudioContext& context,
return nullptr;
}
- return new PannerNode(context);
+ return MakeGarbageCollected<PannerNode>(context);
}
PannerNode* PannerNode::Create(BaseAudioContext* context,
- const PannerOptions& options,
+ const PannerOptions* options,
ExceptionState& exception_state) {
PannerNode* node = Create(*context, exception_state);
@@ -727,23 +727,23 @@ PannerNode* PannerNode::Create(BaseAudioContext* context,
node->HandleChannelOptions(options, exception_state);
- node->setPanningModel(options.panningModel());
- node->setDistanceModel(options.distanceModel());
+ node->setPanningModel(options->panningModel());
+ node->setDistanceModel(options->distanceModel());
- node->positionX()->setValue(options.positionX());
- node->positionY()->setValue(options.positionY());
- node->positionZ()->setValue(options.positionZ());
+ node->positionX()->setValue(options->positionX());
+ node->positionY()->setValue(options->positionY());
+ node->positionZ()->setValue(options->positionZ());
- node->orientationX()->setValue(options.orientationX());
- node->orientationY()->setValue(options.orientationY());
- node->orientationZ()->setValue(options.orientationZ());
+ node->orientationX()->setValue(options->orientationX());
+ node->orientationY()->setValue(options->orientationY());
+ node->orientationZ()->setValue(options->orientationZ());
- node->setRefDistance(options.refDistance(), exception_state);
- node->setMaxDistance(options.maxDistance(), exception_state);
- node->setRolloffFactor(options.rolloffFactor(), exception_state);
- node->setConeInnerAngle(options.coneInnerAngle());
- node->setConeOuterAngle(options.coneOuterAngle());
- node->setConeOuterGain(options.coneOuterGain(), exception_state);
+ node->setRefDistance(options->refDistance(), exception_state);
+ node->setMaxDistance(options->maxDistance(), exception_state);
+ node->setRolloffFactor(options->rolloffFactor(), exception_state);
+ node->setConeInnerAngle(options->coneInnerAngle());
+ node->setConeOuterAngle(options->coneOuterAngle());
+ node->setConeOuterGain(options->coneOuterGain(), exception_state);
return node;
}
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/panner_node.h b/chromium/third_party/blink/renderer/modules/webaudio/panner_node.h
index 2e4851c6c7a..775241fbdc9 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/panner_node.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/panner_node.h
@@ -71,11 +71,11 @@ class PannerHandler final : public AudioHandler {
~PannerHandler() override;
// AudioHandler
- void Process(size_t frames_to_process) override;
+ void Process(uint32_t frames_to_process) override;
void ProcessSampleAccurateValues(AudioBus* destination,
const AudioBus* source,
- size_t frames_to_process);
- void ProcessOnlyAudioParams(size_t frames_to_process) override;
+ uint32_t frames_to_process);
+ void ProcessOnlyAudioParams(uint32_t frames_to_process) override;
void Initialize() override;
void Uninitialize() override;
@@ -118,7 +118,7 @@ class PannerHandler final : public AudioHandler {
}
bool RequiresTailProcessing() const final;
- void SetChannelCount(unsigned long, ExceptionState&) final;
+ void SetChannelCount(unsigned, ExceptionState&) final;
void SetChannelCountMode(const String&, ExceptionState&) final;
private:
@@ -210,10 +210,12 @@ class PannerNode final : public AudioNode {
public:
static PannerNode* Create(BaseAudioContext&, ExceptionState&);
static PannerNode* Create(BaseAudioContext*,
- const PannerOptions&,
+ const PannerOptions*,
ExceptionState&);
PannerHandler& GetPannerHandler() const;
+ PannerNode(BaseAudioContext&);
+
void Trace(blink::Visitor*) override;
// Uses a 3D cartesian coordinate system
@@ -245,8 +247,6 @@ class PannerNode final : public AudioNode {
void setConeOuterGain(double, ExceptionState&);
private:
- PannerNode(BaseAudioContext&);
-
Member<AudioParam> position_x_;
Member<AudioParam> position_y_;
Member<AudioParam> position_z_;
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/periodic_wave.cc b/chromium/third_party/blink/renderer/modules/webaudio/periodic_wave.cc
index 01568dbfa4c..d99e2f78af3 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/periodic_wave.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/periodic_wave.cc
@@ -49,7 +49,7 @@ const unsigned kMaxPeriodicWaveSize = 16384;
const float kCentsPerRange = 1200 / kNumberOfOctaveBands;
-using namespace VectorMath;
+using namespace vector_math;
PeriodicWave* PeriodicWave::Create(BaseAudioContext& context,
const Vector<float>& real,
@@ -72,29 +72,30 @@ PeriodicWave* PeriodicWave::Create(BaseAudioContext& context,
return nullptr;
}
- PeriodicWave* periodic_wave = new PeriodicWave(context.sampleRate());
+ PeriodicWave* periodic_wave =
+ MakeGarbageCollected<PeriodicWave>(context.sampleRate());
periodic_wave->CreateBandLimitedTables(real.data(), imag.data(), real.size(),
disable_normalization);
return periodic_wave;
}
PeriodicWave* PeriodicWave::Create(BaseAudioContext* context,
- const PeriodicWaveOptions& options,
+ const PeriodicWaveOptions* options,
ExceptionState& exception_state) {
- bool normalize = options.disableNormalization();
+ bool normalize = options->disableNormalization();
Vector<float> real_coef;
Vector<float> imag_coef;
- if (options.hasReal()) {
- real_coef = options.real();
- if (options.hasImag())
- imag_coef = options.imag();
+ if (options->hasReal()) {
+ real_coef = options->real();
+ if (options->hasImag())
+ imag_coef = options->imag();
else
imag_coef.resize(real_coef.size());
- } else if (options.hasImag()) {
+ } else if (options->hasImag()) {
// |real| not given, but we have |imag|.
- imag_coef = options.imag();
+ imag_coef = options->imag();
real_coef.resize(imag_coef.size());
} else {
// Neither |real| nor |imag| given. Return an object that would
@@ -108,25 +109,25 @@ PeriodicWave* PeriodicWave::Create(BaseAudioContext* context,
}
PeriodicWave* PeriodicWave::CreateSine(float sample_rate) {
- PeriodicWave* periodic_wave = new PeriodicWave(sample_rate);
+ PeriodicWave* periodic_wave = MakeGarbageCollected<PeriodicWave>(sample_rate);
periodic_wave->GenerateBasicWaveform(OscillatorHandler::SINE);
return periodic_wave;
}
PeriodicWave* PeriodicWave::CreateSquare(float sample_rate) {
- PeriodicWave* periodic_wave = new PeriodicWave(sample_rate);
+ PeriodicWave* periodic_wave = MakeGarbageCollected<PeriodicWave>(sample_rate);
periodic_wave->GenerateBasicWaveform(OscillatorHandler::SQUARE);
return periodic_wave;
}
PeriodicWave* PeriodicWave::CreateSawtooth(float sample_rate) {
- PeriodicWave* periodic_wave = new PeriodicWave(sample_rate);
+ PeriodicWave* periodic_wave = MakeGarbageCollected<PeriodicWave>(sample_rate);
periodic_wave->GenerateBasicWaveform(OscillatorHandler::SAWTOOTH);
return periodic_wave;
}
PeriodicWave* PeriodicWave::CreateTriangle(float sample_rate) {
- PeriodicWave* periodic_wave = new PeriodicWave(sample_rate);
+ PeriodicWave* periodic_wave = MakeGarbageCollected<PeriodicWave>(sample_rate);
periodic_wave->GenerateBasicWaveform(OscillatorHandler::TRIANGLE);
return periodic_wave;
}
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/periodic_wave.h b/chromium/third_party/blink/renderer/modules/webaudio/periodic_wave.h
index 1670f92ca4d..685213fefbd 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/periodic_wave.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/periodic_wave.h
@@ -61,9 +61,10 @@ class PeriodicWave final : public ScriptWrappable {
ExceptionState&);
static PeriodicWave* Create(BaseAudioContext*,
- const PeriodicWaveOptions&,
+ const PeriodicWaveOptions*,
ExceptionState&);
+ explicit PeriodicWave(float sample_rate);
~PeriodicWave() override;
// Returns pointers to the lower and higher wave data for the pitch range
@@ -90,8 +91,6 @@ class PeriodicWave final : public ScriptWrappable {
unsigned NumberOfRanges() const { return number_of_ranges_; }
private:
- explicit PeriodicWave(float sample_rate);
-
void GenerateBasicWaveform(int);
size_t v8_external_memory_;
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/realtime_analyser.cc b/chromium/third_party/blink/renderer/modules/webaudio/realtime_analyser.cc
index e6a293d6e69..dbcee692acf 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/realtime_analyser.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/realtime_analyser.cc
@@ -49,7 +49,7 @@ const unsigned RealtimeAnalyser::kInputBufferSize =
RealtimeAnalyser::RealtimeAnalyser()
: input_buffer_(kInputBufferSize),
write_index_(0),
- down_mix_bus_(AudioBus::Create(1, AudioUtilities::kRenderQuantumFrames)),
+ down_mix_bus_(AudioBus::Create(1, audio_utilities::kRenderQuantumFrames)),
fft_size_(kDefaultFFTSize),
magnitude_buffer_(kDefaultFFTSize / 2),
smoothing_time_constant_(kDefaultSmoothingTimeConstant),
@@ -59,12 +59,12 @@ RealtimeAnalyser::RealtimeAnalyser()
analysis_frame_ = std::make_unique<FFTFrame>(kDefaultFFTSize);
}
-bool RealtimeAnalyser::SetFftSize(size_t size) {
+bool RealtimeAnalyser::SetFftSize(uint32_t size) {
DCHECK(IsMainThread());
// Only allow powers of two within the allowed range.
if (size > kMaxFFTSize || size < kMinFFTSize ||
- !AudioUtilities::IsPowerOfTwo(size))
+ !audio_utilities::IsPowerOfTwo(size))
return false;
if (fft_size_ != size) {
@@ -78,7 +78,7 @@ bool RealtimeAnalyser::SetFftSize(size_t size) {
return true;
}
-void RealtimeAnalyser::WriteInput(AudioBus* bus, size_t frames_to_process) {
+void RealtimeAnalyser::WriteInput(AudioBus* bus, uint32_t frames_to_process) {
bool is_bus_good = bus && bus->NumberOfChannels() > 0 &&
bus->Channel(0)->length() >= frames_to_process;
DCHECK(is_bus_good);
@@ -136,7 +136,7 @@ void RealtimeAnalyser::DoFFTAnalysis() {
// Unroll the input buffer into a temporary buffer, where we'll apply an
// analysis window followed by an FFT.
- size_t fft_size = this->FftSize();
+ uint32_t fft_size = this->FftSize();
AudioFloatArray temporary_buffer(fft_size);
float* input_buffer = input_buffer_.Data();
@@ -198,7 +198,7 @@ void RealtimeAnalyser::ConvertFloatToDb(DOMFloat32Array* destination_array) {
for (unsigned i = 0; i < len; ++i) {
float linear_value = source[i];
- double db_mag = AudioUtilities::LinearToDecibels(linear_value);
+ double db_mag = audio_utilities::LinearToDecibels(linear_value);
destination[i] = float(db_mag);
}
}
@@ -236,7 +236,7 @@ void RealtimeAnalyser::ConvertToByteData(DOMUint8Array* destination_array) {
for (unsigned i = 0; i < len; ++i) {
float linear_value = source[i];
- double db_mag = AudioUtilities::LinearToDecibels(linear_value);
+ double db_mag = audio_utilities::LinearToDecibels(linear_value);
// The range m_minDecibels to m_maxDecibels will be scaled to byte values
// from 0 to UCHAR_MAX.
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/realtime_analyser.h b/chromium/third_party/blink/renderer/modules/webaudio/realtime_analyser.h
index 295b28a4caa..9bd21a25363 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/realtime_analyser.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/realtime_analyser.h
@@ -43,8 +43,8 @@ class RealtimeAnalyser final {
public:
RealtimeAnalyser();
- size_t FftSize() const { return fft_size_; }
- bool SetFftSize(size_t);
+ uint32_t FftSize() const { return fft_size_; }
+ bool SetFftSize(uint32_t);
unsigned FrequencyBinCount() const { return fft_size_ / 2; }
@@ -63,7 +63,7 @@ class RealtimeAnalyser final {
void GetByteTimeDomainData(DOMUint8Array*);
// The audio thread writes input data here.
- void WriteInput(AudioBus*, size_t frames_to_process);
+ void WriteInput(AudioBus*, uint32_t frames_to_process);
static const double kDefaultSmoothingTimeConstant;
static const double kDefaultMinDecibels;
@@ -87,7 +87,7 @@ class RealtimeAnalyser final {
// Input audio is downmixed to this bus before copying to m_inputBuffer.
scoped_refptr<AudioBus> down_mix_bus_;
- size_t fft_size_;
+ uint32_t fft_size_;
std::unique_ptr<FFTFrame> analysis_frame_;
void DoFFTAnalysis();
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/script_processor_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/script_processor_node.cc
index 7d6bce137ea..5c81ddbfaf1 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/script_processor_node.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/script_processor_node.cc
@@ -38,8 +38,8 @@
#include "third_party/blink/renderer/modules/webaudio/default_audio_destination_node.h"
#include "third_party/blink/renderer/platform/bindings/exception_state.h"
#include "third_party/blink/renderer/platform/cross_thread_functional.h"
+#include "third_party/blink/renderer/platform/scheduler/public/post_cross_thread_task.h"
#include "third_party/blink/renderer/platform/waitable_event.h"
-#include "third_party/blink/renderer/platform/web_task_runner.h"
namespace blink {
@@ -51,19 +51,20 @@ ScriptProcessorHandler::ScriptProcessorHandler(
unsigned number_of_output_channels)
: AudioHandler(kNodeTypeScriptProcessor, node, sample_rate),
double_buffer_index_(0),
- input_buffers_(new HeapVector<Member<AudioBuffer>>()),
- output_buffers_(new HeapVector<Member<AudioBuffer>>()),
+ input_buffers_(MakeGarbageCollected<HeapVector<Member<AudioBuffer>>>()),
+ output_buffers_(MakeGarbageCollected<HeapVector<Member<AudioBuffer>>>()),
buffer_size_(buffer_size),
buffer_read_write_index_(0),
number_of_input_channels_(number_of_input_channels),
number_of_output_channels_(number_of_output_channels),
- internal_input_bus_(AudioBus::Create(number_of_input_channels,
- AudioUtilities::kRenderQuantumFrames,
- false)) {
+ internal_input_bus_(
+ AudioBus::Create(number_of_input_channels,
+ audio_utilities::kRenderQuantumFrames,
+ false)) {
// Regardless of the allowed buffer sizes, we still need to process at the
// granularity of the AudioNode.
- if (buffer_size_ < AudioUtilities::kRenderQuantumFrames)
- buffer_size_ = AudioUtilities::kRenderQuantumFrames;
+ if (buffer_size_ < audio_utilities::kRenderQuantumFrames)
+ buffer_size_ = audio_utilities::kRenderQuantumFrames;
DCHECK_LE(number_of_input_channels, BaseAudioContext::MaxNumberOfChannels());
@@ -124,7 +125,7 @@ void ScriptProcessorHandler::Initialize() {
AudioHandler::Initialize();
}
-void ScriptProcessorHandler::Process(size_t frames_to_process) {
+void ScriptProcessorHandler::Process(uint32_t frames_to_process) {
// Discussion about inputs and outputs:
// As in other AudioNodes, ScriptProcessorNode uses an AudioBus for its input
// and output (see inputBus and outputBus below). Additionally, there is a
@@ -333,7 +334,7 @@ double ScriptProcessorHandler::LatencyTime() const {
return std::numeric_limits<double>::infinity();
}
-void ScriptProcessorHandler::SetChannelCount(unsigned long channel_count,
+void ScriptProcessorHandler::SetChannelCount(unsigned channel_count,
ExceptionState& exception_state) {
DCHECK(IsMainThread());
BaseAudioContext::GraphAutoLocker locker(Context());
@@ -498,7 +499,7 @@ ScriptProcessorNode* ScriptProcessorNode::Create(
return nullptr;
}
- ScriptProcessorNode* node = new ScriptProcessorNode(
+ ScriptProcessorNode* node = MakeGarbageCollected<ScriptProcessorNode>(
context, context.sampleRate(), buffer_size, number_of_input_channels,
number_of_output_channels);
@@ -511,7 +512,7 @@ ScriptProcessorNode* ScriptProcessorNode::Create(
return node;
}
-size_t ScriptProcessorNode::bufferSize() const {
+uint32_t ScriptProcessorNode::bufferSize() const {
return static_cast<ScriptProcessorHandler&>(Handler()).BufferSize();
}
@@ -522,7 +523,7 @@ bool ScriptProcessorNode::HasPendingActivity() const {
// If |onaudioprocess| event handler is defined, the node should not be
// GCed even if it is out of scope.
- if (HasEventListeners(EventTypeNames::audioprocess))
+ if (HasEventListeners(event_type_names::kAudioprocess))
return true;
return false;
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/script_processor_node.h b/chromium/third_party/blink/renderer/modules/webaudio/script_processor_node.h
index 9e658ef1453..04161593dc1 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/script_processor_node.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/script_processor_node.h
@@ -60,12 +60,12 @@ class ScriptProcessorHandler final : public AudioHandler {
~ScriptProcessorHandler() override;
// AudioHandler
- void Process(size_t frames_to_process) override;
+ void Process(uint32_t frames_to_process) override;
void Initialize() override;
- size_t BufferSize() const { return buffer_size_; }
+ uint32_t BufferSize() const { return static_cast<uint32_t>(buffer_size_); }
- void SetChannelCount(unsigned long, ExceptionState&) override;
+ void SetChannelCount(unsigned, ExceptionState&) override;
void SetChannelCountMode(const String&, ExceptionState&) override;
unsigned NumberOfOutputChannels() const override {
@@ -139,20 +139,19 @@ class ScriptProcessorNode final
unsigned number_of_output_channels,
ExceptionState&);
- DEFINE_ATTRIBUTE_EVENT_LISTENER(audioprocess);
- size_t bufferSize() const;
-
- // ScriptWrappable
- bool HasPendingActivity() const final;
-
- void Trace(blink::Visitor* visitor) override { AudioNode::Trace(visitor); }
-
- private:
ScriptProcessorNode(BaseAudioContext&,
float sample_rate,
size_t buffer_size,
unsigned number_of_input_channels,
unsigned number_of_output_channels);
+
+ DEFINE_ATTRIBUTE_EVENT_LISTENER(audioprocess, kAudioprocess);
+ uint32_t bufferSize() const;
+
+ // ScriptWrappable
+ bool HasPendingActivity() const final;
+
+ void Trace(blink::Visitor* visitor) override { AudioNode::Trace(visitor); }
};
} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/stereo_panner_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/stereo_panner_node.cc
index 9d01baaeb51..7e6305541b7 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/stereo_panner_node.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/stereo_panner_node.cc
@@ -21,7 +21,7 @@ StereoPannerHandler::StereoPannerHandler(AudioNode& node,
AudioParamHandler& pan)
: AudioHandler(kNodeTypeStereoPanner, node, sample_rate),
pan_(&pan),
- sample_accurate_pan_values_(AudioUtilities::kRenderQuantumFrames) {
+ sample_accurate_pan_values_(audio_utilities::kRenderQuantumFrames) {
AddInput();
AddOutput(2);
@@ -45,7 +45,7 @@ StereoPannerHandler::~StereoPannerHandler() {
Uninitialize();
}
-void StereoPannerHandler::Process(size_t frames_to_process) {
+void StereoPannerHandler::Process(uint32_t frames_to_process) {
AudioBus* output_bus = Output(0).Bus();
if (!IsInitialized() || !Input(0).IsConnected() || !stereo_panner_.get()) {
@@ -74,9 +74,9 @@ void StereoPannerHandler::Process(size_t frames_to_process) {
}
}
-void StereoPannerHandler::ProcessOnlyAudioParams(size_t frames_to_process) {
- float values[AudioUtilities::kRenderQuantumFrames];
- DCHECK_LE(frames_to_process, AudioUtilities::kRenderQuantumFrames);
+void StereoPannerHandler::ProcessOnlyAudioParams(uint32_t frames_to_process) {
+ float values[audio_utilities::kRenderQuantumFrames];
+ DCHECK_LE(frames_to_process, audio_utilities::kRenderQuantumFrames);
pan_->CalculateSampleAccurateValues(values, frames_to_process);
}
@@ -90,7 +90,7 @@ void StereoPannerHandler::Initialize() {
AudioHandler::Initialize();
}
-void StereoPannerHandler::SetChannelCount(unsigned long channel_count,
+void StereoPannerHandler::SetChannelCount(unsigned channel_count,
ExceptionState& exception_state) {
DCHECK(IsMainThread());
BaseAudioContext::GraphAutoLocker locker(Context());
@@ -162,11 +162,11 @@ StereoPannerNode* StereoPannerNode::Create(BaseAudioContext& context,
return nullptr;
}
- return new StereoPannerNode(context);
+ return MakeGarbageCollected<StereoPannerNode>(context);
}
StereoPannerNode* StereoPannerNode::Create(BaseAudioContext* context,
- const StereoPannerOptions& options,
+ const StereoPannerOptions* options,
ExceptionState& exception_state) {
StereoPannerNode* node = Create(*context, exception_state);
@@ -175,7 +175,7 @@ StereoPannerNode* StereoPannerNode::Create(BaseAudioContext* context,
node->HandleChannelOptions(options, exception_state);
- node->pan()->setValue(options.pan());
+ node->pan()->setValue(options->pan());
return node;
}
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/stereo_panner_node.h b/chromium/third_party/blink/renderer/modules/webaudio/stereo_panner_node.h
index b527220ee35..f03abe3633a 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/stereo_panner_node.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/stereo_panner_node.h
@@ -26,11 +26,11 @@ class StereoPannerHandler final : public AudioHandler {
AudioParamHandler& pan);
~StereoPannerHandler() override;
- void Process(size_t frames_to_process) override;
- void ProcessOnlyAudioParams(size_t frames_to_process) override;
+ void Process(uint32_t frames_to_process) override;
+ void ProcessOnlyAudioParams(uint32_t frames_to_process) override;
void Initialize() override;
- void SetChannelCount(unsigned long, ExceptionState&) final;
+ void SetChannelCount(unsigned, ExceptionState&) final;
void SetChannelCountMode(const String&, ExceptionState&) final;
double TailTime() const override { return 0; }
@@ -54,15 +54,16 @@ class StereoPannerNode final : public AudioNode {
public:
static StereoPannerNode* Create(BaseAudioContext&, ExceptionState&);
static StereoPannerNode* Create(BaseAudioContext*,
- const StereoPannerOptions&,
+ const StereoPannerOptions*,
ExceptionState&);
+
+ StereoPannerNode(BaseAudioContext&);
+
void Trace(blink::Visitor*) override;
AudioParam* pan() const;
private:
- StereoPannerNode(BaseAudioContext&);
-
Member<AudioParam> pan_;
};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_dsp_kernel.cc b/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_dsp_kernel.cc
index 0e48f88bef2..ee76705be11 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_dsp_kernel.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_dsp_kernel.cc
@@ -42,23 +42,23 @@ WaveShaperDSPKernel::WaveShaperDSPKernel(WaveShaperProcessor* processor)
void WaveShaperDSPKernel::LazyInitializeOversampling() {
if (!temp_buffer_) {
temp_buffer_ = std::make_unique<AudioFloatArray>(
- AudioUtilities::kRenderQuantumFrames * 2);
+ audio_utilities::kRenderQuantumFrames * 2);
temp_buffer2_ = std::make_unique<AudioFloatArray>(
- AudioUtilities::kRenderQuantumFrames * 4);
+ audio_utilities::kRenderQuantumFrames * 4);
up_sampler_ =
- std::make_unique<UpSampler>(AudioUtilities::kRenderQuantumFrames);
- down_sampler_ =
- std::make_unique<DownSampler>(AudioUtilities::kRenderQuantumFrames * 2);
+ std::make_unique<UpSampler>(audio_utilities::kRenderQuantumFrames);
+ down_sampler_ = std::make_unique<DownSampler>(
+ audio_utilities::kRenderQuantumFrames * 2);
up_sampler2_ =
- std::make_unique<UpSampler>(AudioUtilities::kRenderQuantumFrames * 2);
- down_sampler2_ =
- std::make_unique<DownSampler>(AudioUtilities::kRenderQuantumFrames * 4);
+ std::make_unique<UpSampler>(audio_utilities::kRenderQuantumFrames * 2);
+ down_sampler2_ = std::make_unique<DownSampler>(
+ audio_utilities::kRenderQuantumFrames * 4);
}
}
void WaveShaperDSPKernel::Process(const float* source,
float* destination,
- size_t frames_to_process) {
+ uint32_t frames_to_process) {
switch (GetWaveShaperProcessor()->Oversample()) {
case WaveShaperProcessor::kOverSampleNone:
ProcessCurve(source, destination, frames_to_process);
@@ -110,7 +110,7 @@ double WaveShaperDSPKernel::WaveShaperCurveValue(float input,
void WaveShaperDSPKernel::ProcessCurve(const float* source,
float* destination,
- size_t frames_to_process) {
+ uint32_t frames_to_process) {
DCHECK(source);
DCHECK(destination);
DCHECK(GetWaveShaperProcessor());
@@ -142,8 +142,8 @@ void WaveShaperDSPKernel::ProcessCurve(const float* source,
void WaveShaperDSPKernel::ProcessCurve2x(const float* source,
float* destination,
- size_t frames_to_process) {
- bool is_safe = frames_to_process == AudioUtilities::kRenderQuantumFrames;
+ uint32_t frames_to_process) {
+ bool is_safe = frames_to_process == audio_utilities::kRenderQuantumFrames;
DCHECK(is_safe);
if (!is_safe)
return;
@@ -160,8 +160,8 @@ void WaveShaperDSPKernel::ProcessCurve2x(const float* source,
void WaveShaperDSPKernel::ProcessCurve4x(const float* source,
float* destination,
- size_t frames_to_process) {
- bool is_safe = frames_to_process == AudioUtilities::kRenderQuantumFrames;
+ uint32_t frames_to_process) {
+ bool is_safe = frames_to_process == audio_utilities::kRenderQuantumFrames;
DCHECK(is_safe);
if (!is_safe)
return;
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_dsp_kernel.h b/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_dsp_kernel.h
index 77f037b7f85..cea94736949 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_dsp_kernel.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_dsp_kernel.h
@@ -47,7 +47,7 @@ class WaveShaperDSPKernel final : public AudioDSPKernel {
// AudioDSPKernel
void Process(const float* source,
float* dest,
- size_t frames_to_process) override;
+ uint32_t frames_to_process) override;
void Reset() override;
double TailTime() const override;
double LatencyTime() const override;
@@ -67,15 +67,17 @@ class WaveShaperDSPKernel final : public AudioDSPKernel {
protected:
// Apply the shaping curve.
- void ProcessCurve(const float* source, float* dest, size_t frames_to_process);
+ void ProcessCurve(const float* source,
+ float* dest,
+ uint32_t frames_to_process);
// Use up-sampling, process at the higher sample-rate, then down-sample.
void ProcessCurve2x(const float* source,
float* dest,
- size_t frames_to_process);
+ uint32_t frames_to_process);
void ProcessCurve4x(const float* source,
float* dest,
- size_t frames_to_process);
+ uint32_t frames_to_process);
WaveShaperProcessor* GetWaveShaperProcessor() {
return static_cast<WaveShaperProcessor*>(Processor());
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_node.cc
index b74b95d8f5e..d36b0951582 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_node.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_node.cc
@@ -61,11 +61,11 @@ WaveShaperNode* WaveShaperNode::Create(BaseAudioContext& context,
return nullptr;
}
- return new WaveShaperNode(context);
+ return MakeGarbageCollected<WaveShaperNode>(context);
}
WaveShaperNode* WaveShaperNode::Create(BaseAudioContext* context,
- const WaveShaperOptions& options,
+ const WaveShaperOptions* options,
ExceptionState& exception_state) {
WaveShaperNode* node = Create(*context, exception_state);
@@ -74,10 +74,10 @@ WaveShaperNode* WaveShaperNode::Create(BaseAudioContext* context,
node->HandleChannelOptions(options, exception_state);
- if (options.hasCurve())
- node->setCurve(options.curve(), exception_state);
+ if (options->hasCurve())
+ node->setCurve(options->curve(), exception_state);
- node->setOversample(options.oversample());
+ node->setOversample(options->oversample());
return node;
}
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_node.h b/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_node.h
index 6f51f121dc9..db5a832540b 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_node.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_node.h
@@ -52,9 +52,11 @@ class WaveShaperNode final : public AudioNode {
public:
static WaveShaperNode* Create(BaseAudioContext&, ExceptionState&);
static WaveShaperNode* Create(BaseAudioContext*,
- const WaveShaperOptions&,
+ const WaveShaperOptions*,
ExceptionState&);
+ explicit WaveShaperNode(BaseAudioContext&);
+
// setCurve() is called on the main thread.
void setCurve(NotShared<DOMFloat32Array>, ExceptionState&);
void setCurve(const Vector<float>&, ExceptionState&);
@@ -64,8 +66,6 @@ class WaveShaperNode final : public AudioNode {
String oversample() const;
private:
- explicit WaveShaperNode(BaseAudioContext&);
-
void SetCurveImpl(const float* curve_data,
unsigned curve_length,
ExceptionState&);
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_processor.cc b/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_processor.cc
index ec42e046c84..6c3e83b8f54 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_processor.cc
+++ b/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_processor.cc
@@ -30,7 +30,7 @@
namespace blink {
WaveShaperProcessor::WaveShaperProcessor(float sample_rate,
- size_t number_of_channels)
+ unsigned number_of_channels)
: AudioDSPKernelProcessor(sample_rate, number_of_channels),
oversample_(kOverSampleNone) {}
@@ -91,7 +91,7 @@ void WaveShaperProcessor::SetOversample(OverSampleType oversample) {
void WaveShaperProcessor::Process(const AudioBus* source,
AudioBus* destination,
- size_t frames_to_process) {
+ uint32_t frames_to_process) {
if (!IsInitialized()) {
destination->Zero();
return;
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_processor.h b/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_processor.h
index 8c5ba1099af..b83906755fe 100644
--- a/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_processor.h
+++ b/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_processor.h
@@ -43,7 +43,7 @@ class WaveShaperProcessor final : public AudioDSPKernelProcessor {
public:
enum OverSampleType { kOverSampleNone, kOverSample2x, kOverSample4x };
- WaveShaperProcessor(float sample_rate, size_t number_of_channels);
+ WaveShaperProcessor(float sample_rate, unsigned number_of_channels);
~WaveShaperProcessor() override;
@@ -51,7 +51,7 @@ class WaveShaperProcessor final : public AudioDSPKernelProcessor {
void Process(const AudioBus* source,
AudioBus* destination,
- size_t frames_to_process) override;
+ uint32_t frames_to_process) override;
void SetCurve(const float* curve_data, unsigned curve_length);
Vector<float>* Curve() const { return curve_.get(); };