summaryrefslogtreecommitdiff
path: root/chromium/third_party/blink/renderer/modules/webaudio
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2018-05-15 10:20:33 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2018-05-15 10:28:57 +0000
commitd17ea114e5ef69ad5d5d7413280a13e6428098aa (patch)
tree2c01a75df69f30d27b1432467cfe7c1467a498da /chromium/third_party/blink/renderer/modules/webaudio
parent8c5c43c7b138c9b4b0bf56d946e61d3bbc111bec (diff)
downloadqtwebengine-chromium-d17ea114e5ef69ad5d5d7413280a13e6428098aa.tar.gz
BASELINE: Update Chromium to 67.0.3396.47
Change-Id: Idcb1341782e417561a2473eeecc82642dafda5b7 Reviewed-by: Michal Klocek <michal.klocek@qt.io>
Diffstat (limited to 'chromium/third_party/blink/renderer/modules/webaudio')
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/BUILD.gn135
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/DEPS9
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/OWNERS5
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/analyser_node.cc294
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/analyser_node.h132
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/analyser_node.idl51
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/analyser_options.idl11
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/async_audio_decoder.cc111
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/async_audio_decoder.h88
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_basic_inspector_node.cc131
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_basic_inspector_node.h82
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_basic_processor_handler.cc157
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_basic_processor_handler.h75
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_basic_processor_handler_test.cc69
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_buffer.cc345
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_buffer.h135
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_buffer.idl44
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_buffer_options.idl10
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_buffer_source_node.cc772
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_buffer_source_node.h227
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_buffer_source_node.idl46
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_buffer_source_options.idl13
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_context.cc267
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_context.h62
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_context.idl57
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_context_options.idl11
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_context_test.cc156
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_destination_node.cc131
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_destination_node.h135
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_destination_node.idl29
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_listener.cc323
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_listener.h197
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_listener.idl48
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_node.cc1037
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_node.h387
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_node.idl55
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_node_input.cc228
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_node_input.h117
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_node_options.idl10
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_node_output.cc250
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_node_output.h186
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_param.cc413
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_param.h276
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_param.idl56
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_param_descriptor.idl14
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_param_map.cc65
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_param_map.h53
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_param_map.idl9
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_param_timeline.cc1991
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_param_timeline.h488
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_processing_event.cc78
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_processing_event.h76
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_processing_event.idl34
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_processing_event_init.idl10
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_scheduled_source_node.cc294
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_scheduled_source_node.h169
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_scheduled_source_node.idl13
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_summing_junction.cc65
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_summing_junction.h97
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_timestamp.idl9
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_worklet.cc98
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_worklet.h74
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_worklet.idl10
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_global_scope.cc410
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_global_scope.h141
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_global_scope.idl15
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_global_scope_test.cc399
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_messaging_proxy.cc106
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_messaging_proxy.h79
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_node.cc383
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_node.h129
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_node.idl17
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_node_options.idl12
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_object_proxy.cc63
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_object_proxy.h38
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_processor.cc68
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_processor.h76
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_processor.idl13
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_processor_definition.cc72
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_processor_definition.h77
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_processor_error_state.h25
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_thread.cc113
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_thread.h70
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_thread_test.cc198
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/base_audio_context.cc1038
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/base_audio_context.h531
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/base_audio_context.idl74
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/base_audio_context_test.cc751
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/biquad_dsp_kernel.cc256
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/biquad_dsp_kernel.h93
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/biquad_filter_node.cc235
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/biquad_filter_node.h112
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/biquad_filter_node.idl54
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/biquad_filter_options.idl12
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/biquad_processor.cc163
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/biquad_processor.h109
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/channel_merger_node.cc180
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/channel_merger_node.h78
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/channel_merger_node.idl36
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/channel_merger_options.idl8
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/channel_splitter_node.cc186
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/channel_splitter_node.h76
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/channel_splitter_node.idl33
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/channel_splitter_options.idl8
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/constant_source_node.cc160
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/constant_source_node.h65
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/constant_source_node.idl14
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/constant_source_options.idl8
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/convolver_node.cc315
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/convolver_node.h114
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/convolver_node.idl36
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/convolver_node_test.cc28
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/convolver_options.idl9
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/cross_thread_audio_worklet_processor_info.h68
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/default_audio_destination_node.cc196
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/default_audio_destination_node.h101
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/deferred_task_handler.cc388
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/deferred_task_handler.h237
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/delay_dsp_kernel.cc74
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/delay_dsp_kernel.h56
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/delay_node.cc134
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/delay_node.h75
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/delay_node.idl34
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/delay_options.idl9
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/delay_processor.cc58
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/delay_processor.h60
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/dynamics_compressor_node.cc297
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/dynamics_compressor_node.h119
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/dynamics_compressor_node.idl39
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/dynamics_compressor_node_test.cc29
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/dynamics_compressor_options.idl12
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/gain_node.cc179
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/gain_node.h90
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/gain_node.idl35
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/gain_options.idl8
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/iir_filter_node.cc243
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/iir_filter_node.h67
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/iir_filter_node.idl15
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/iir_filter_options.idl9
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/iir_processor.cc89
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/iir_processor.h52
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/iirdsp_kernel.cc62
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/iirdsp_kernel.h46
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/media_element_audio_source_node.cc321
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/media_element_audio_source_node.h157
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/media_element_audio_source_node.idl35
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/media_element_audio_source_options.idl8
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_destination_node.cc191
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_destination_node.h96
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_destination_node.idl34
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_source_node.cc201
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_source_node.h115
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_source_node.idl34
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_source_options.idl8
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/offline_audio_completion_event.cc70
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/offline_audio_completion_event.h68
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/offline_audio_completion_event.idl32
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/offline_audio_completion_event_init.idl8
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/offline_audio_context.cc471
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/offline_audio_context.h139
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/offline_audio_context.idl39
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/offline_audio_context_options.idl10
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/offline_audio_destination_node.cc418
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/offline_audio_destination_node.h178
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/oscillator_node.cc558
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/oscillator_node.h138
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/oscillator_node.idl49
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/oscillator_options.idl11
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/panner_node.cc848
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/panner_node.h261
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/panner_node.idl70
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/panner_options.idl23
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/periodic_wave.cc389
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/periodic_wave.h127
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/periodic_wave.idl34
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/periodic_wave_constraints.idl8
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/periodic_wave_options.idl9
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/realtime_analyser.cc345
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/realtime_analyser.h119
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/script_processor_node.cc516
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/script_processor_node.h159
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/script_processor_node.idl35
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/script_processor_node_test.cc31
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/stereo_panner_node.cc190
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/stereo_panner_node.h71
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/stereo_panner_node.idl15
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/stereo_panner_node_test.cc28
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/stereo_panner_options.idl8
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/testing/internals_web_audio.cc21
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/testing/internals_web_audio.h23
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/testing/internals_web_audio.idl9
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_dsp_kernel.cc219
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_dsp_kernel.h87
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_node.cc173
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_node.h77
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_node.idl41
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_options.idl9
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_processor.cc109
-rw-r--r--chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_processor.h71
199 files changed, 28701 insertions, 0 deletions
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/BUILD.gn b/chromium/third_party/blink/renderer/modules/webaudio/BUILD.gn
new file mode 100644
index 00000000000..52ec0bf8837
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/BUILD.gn
@@ -0,0 +1,135 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//third_party/blink/renderer/modules/modules.gni")
+
+blink_modules_sources("webaudio") {
+ sources = [
+ "analyser_node.cc",
+ "analyser_node.h",
+ "async_audio_decoder.cc",
+ "async_audio_decoder.h",
+ "audio_basic_inspector_node.cc",
+ "audio_basic_inspector_node.h",
+ "audio_basic_processor_handler.cc",
+ "audio_basic_processor_handler.h",
+ "audio_buffer.cc",
+ "audio_buffer.h",
+ "audio_buffer_source_node.cc",
+ "audio_buffer_source_node.h",
+ "audio_context.cc",
+ "audio_context.h",
+ "audio_destination_node.cc",
+ "audio_destination_node.h",
+ "audio_listener.cc",
+ "audio_listener.h",
+ "audio_node.cc",
+ "audio_node.h",
+ "audio_node_input.cc",
+ "audio_node_input.h",
+ "audio_node_output.cc",
+ "audio_node_output.h",
+ "audio_param.cc",
+ "audio_param.h",
+ "audio_param_map.cc",
+ "audio_param_map.h",
+ "audio_param_timeline.cc",
+ "audio_param_timeline.h",
+ "audio_processing_event.cc",
+ "audio_processing_event.h",
+ "audio_scheduled_source_node.cc",
+ "audio_scheduled_source_node.h",
+ "audio_summing_junction.cc",
+ "audio_summing_junction.h",
+ "audio_worklet.cc",
+ "audio_worklet.h",
+ "audio_worklet_global_scope.cc",
+ "audio_worklet_global_scope.h",
+ "audio_worklet_messaging_proxy.cc",
+ "audio_worklet_messaging_proxy.h",
+ "audio_worklet_node.cc",
+ "audio_worklet_node.h",
+ "audio_worklet_object_proxy.cc",
+ "audio_worklet_object_proxy.h",
+ "audio_worklet_processor.cc",
+ "audio_worklet_processor.h",
+ "audio_worklet_processor_definition.cc",
+ "audio_worklet_processor_definition.h",
+ "audio_worklet_processor_error_state.h",
+ "audio_worklet_thread.cc",
+ "audio_worklet_thread.h",
+ "base_audio_context.cc",
+ "base_audio_context.h",
+ "biquad_dsp_kernel.cc",
+ "biquad_dsp_kernel.h",
+ "biquad_filter_node.cc",
+ "biquad_filter_node.h",
+ "biquad_processor.cc",
+ "biquad_processor.h",
+ "channel_merger_node.cc",
+ "channel_merger_node.h",
+ "channel_splitter_node.cc",
+ "channel_splitter_node.h",
+ "constant_source_node.cc",
+ "constant_source_node.h",
+ "convolver_node.cc",
+ "convolver_node.h",
+ "cross_thread_audio_worklet_processor_info.h",
+ "default_audio_destination_node.cc",
+ "default_audio_destination_node.h",
+ "deferred_task_handler.cc",
+ "deferred_task_handler.h",
+ "delay_dsp_kernel.cc",
+ "delay_dsp_kernel.h",
+ "delay_node.cc",
+ "delay_node.h",
+ "delay_processor.cc",
+ "delay_processor.h",
+ "dynamics_compressor_node.cc",
+ "dynamics_compressor_node.h",
+ "gain_node.cc",
+ "gain_node.h",
+ "iir_filter_node.cc",
+ "iir_filter_node.h",
+ "iir_processor.cc",
+ "iir_processor.h",
+ "iirdsp_kernel.cc",
+ "iirdsp_kernel.h",
+ "media_element_audio_source_node.cc",
+ "media_element_audio_source_node.h",
+ "media_stream_audio_destination_node.cc",
+ "media_stream_audio_destination_node.h",
+ "media_stream_audio_source_node.cc",
+ "media_stream_audio_source_node.h",
+ "offline_audio_completion_event.cc",
+ "offline_audio_completion_event.h",
+ "offline_audio_context.cc",
+ "offline_audio_context.h",
+ "offline_audio_destination_node.cc",
+ "offline_audio_destination_node.h",
+ "oscillator_node.cc",
+ "oscillator_node.h",
+ "panner_node.cc",
+ "panner_node.h",
+ "periodic_wave.cc",
+ "periodic_wave.h",
+ "realtime_analyser.cc",
+ "realtime_analyser.h",
+ "script_processor_node.cc",
+ "script_processor_node.h",
+ "stereo_panner_node.cc",
+ "stereo_panner_node.h",
+ "wave_shaper_dsp_kernel.cc",
+ "wave_shaper_dsp_kernel.h",
+ "wave_shaper_node.cc",
+ "wave_shaper_node.h",
+ "wave_shaper_processor.cc",
+ "wave_shaper_processor.h",
+ ]
+
+ if (is_win) {
+ # Result of 32-bit shift implicitly converted to 64 bits.
+ cflags = [ "/wd4334" ]
+ }
+}
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/DEPS b/chromium/third_party/blink/renderer/modules/webaudio/DEPS
new file mode 100644
index 00000000000..5011e3e1c62
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/DEPS
@@ -0,0 +1,9 @@
+include_rules = [
+ "-third_party/blink/renderer/modules",
+ "+third_party/blink/renderer/modules/event_modules.h",
+ "+third_party/blink/renderer/modules/event_target_modules.h",
+ "+third_party/blink/renderer/modules/modules_export.h",
+ "+third_party/blink/renderer/modules/mediastream",
+ "+third_party/blink/renderer/modules/webaudio",
+ "+third_party/blink/renderer/modules/worklet",
+]
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/OWNERS b/chromium/third_party/blink/renderer/modules/webaudio/OWNERS
new file mode 100644
index 00000000000..caaf05cf1d8
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/OWNERS
@@ -0,0 +1,5 @@
+hongchan@chromium.org
+kbr@chromium.org
+rtoy@chromium.org
+
+# COMPONENT: Blink>WebAudio
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/analyser_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/analyser_node.cc
new file mode 100644
index 00000000000..d90c8434c24
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/analyser_node.cc
@@ -0,0 +1,294 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#include "third_party/blink/renderer/bindings/core/v8/exception_messages.h"
+#include "third_party/blink/renderer/bindings/core/v8/exception_state.h"
+#include "third_party/blink/renderer/core/dom/exception_code.h"
+#include "third_party/blink/renderer/modules/webaudio/analyser_node.h"
+#include "third_party/blink/renderer/modules/webaudio/analyser_options.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node_input.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node_output.h"
+
+namespace blink {
+
+AnalyserHandler::AnalyserHandler(AudioNode& node, float sample_rate)
+ : AudioBasicInspectorHandler(kNodeTypeAnalyser, node, sample_rate, 1) {
+ channel_count_ = 1;
+ Initialize();
+}
+
+scoped_refptr<AnalyserHandler> AnalyserHandler::Create(AudioNode& node,
+ float sample_rate) {
+ return base::AdoptRef(new AnalyserHandler(node, sample_rate));
+}
+
+AnalyserHandler::~AnalyserHandler() {
+ Uninitialize();
+}
+
+void AnalyserHandler::Process(size_t frames_to_process) {
+ AudioBus* output_bus = Output(0).Bus();
+
+ if (!IsInitialized()) {
+ output_bus->Zero();
+ return;
+ }
+
+ AudioBus* input_bus = Input(0).Bus();
+
+ // Give the analyser the audio which is passing through this
+ // AudioNode. This must always be done so that the state of the
+ // Analyser reflects the current input.
+ analyser_.WriteInput(input_bus, frames_to_process);
+
+ if (!Input(0).IsConnected()) {
+ // No inputs, so clear the output, and propagate the silence hint.
+ output_bus->Zero();
+ return;
+ }
+
+ // For in-place processing, our override of pullInputs() will just pass the
+ // audio data through unchanged if the channel count matches from input to
+ // output (resulting in inputBus == outputBus). Otherwise, do an up-mix to
+ // stereo.
+ if (input_bus != output_bus)
+ output_bus->CopyFrom(*input_bus);
+}
+
+void AnalyserHandler::SetFftSize(unsigned size,
+ ExceptionState& exception_state) {
+ if (!analyser_.SetFftSize(size)) {
+ exception_state.ThrowDOMException(
+ kIndexSizeError,
+ (size < RealtimeAnalyser::kMinFFTSize ||
+ size > RealtimeAnalyser::kMaxFFTSize)
+ ? ExceptionMessages::IndexOutsideRange(
+ "FFT size", size, RealtimeAnalyser::kMinFFTSize,
+ ExceptionMessages::kInclusiveBound,
+ RealtimeAnalyser::kMaxFFTSize,
+ ExceptionMessages::kInclusiveBound)
+ : ("The value provided (" + String::Number(size) +
+ ") is not a power of two."));
+ }
+}
+
+void AnalyserHandler::SetMinDecibels(double k,
+ ExceptionState& exception_state) {
+ if (k < MaxDecibels()) {
+ analyser_.SetMinDecibels(k);
+ } else {
+ exception_state.ThrowDOMException(
+ kIndexSizeError, ExceptionMessages::IndexExceedsMaximumBound(
+ "minDecibels", k, MaxDecibels()));
+ }
+}
+
+void AnalyserHandler::SetMaxDecibels(double k,
+ ExceptionState& exception_state) {
+ if (k > MinDecibels()) {
+ analyser_.SetMaxDecibels(k);
+ } else {
+ exception_state.ThrowDOMException(
+ kIndexSizeError, ExceptionMessages::IndexExceedsMinimumBound(
+ "maxDecibels", k, MinDecibels()));
+ }
+}
+
+void AnalyserHandler::SetMinMaxDecibels(double min_decibels,
+ double max_decibels,
+ ExceptionState& exception_state) {
+ if (min_decibels >= max_decibels) {
+ exception_state.ThrowDOMException(
+ kIndexSizeError, "maxDecibels (" + String::Number(max_decibels) +
+ ") must be greater than or equal to minDecibels " +
+ "( " + String::Number(min_decibels) + ").");
+ return;
+ }
+ analyser_.SetMinDecibels(min_decibels);
+ analyser_.SetMaxDecibels(max_decibels);
+}
+
+void AnalyserHandler::SetSmoothingTimeConstant(
+ double k,
+ ExceptionState& exception_state) {
+ if (k >= 0 && k <= 1) {
+ analyser_.SetSmoothingTimeConstant(k);
+ } else {
+ exception_state.ThrowDOMException(
+ kIndexSizeError,
+ ExceptionMessages::IndexOutsideRange(
+ "smoothing value", k, 0.0, ExceptionMessages::kInclusiveBound, 1.0,
+ ExceptionMessages::kInclusiveBound));
+ }
+}
+
+void AnalyserHandler::UpdatePullStatus() {
+ DCHECK(Context()->IsGraphOwner());
+
+ if (Output(0).IsConnected()) {
+ // When an AudioBasicInspectorNode is connected to a downstream node, it
+ // will get pulled by the downstream node, thus remove it from the context's
+ // automatic pull list.
+ if (need_automatic_pull_) {
+ Context()->GetDeferredTaskHandler().RemoveAutomaticPullNode(this);
+ need_automatic_pull_ = false;
+ }
+ } else {
+ unsigned number_of_input_connections =
+ Input(0).NumberOfRenderingConnections();
+ // When an AnalyserNode is not connected to any downstream node
+ // while still connected from upstream node(s), add it to the context's
+ // automatic pull list.
+ //
+ // But don't remove the AnalyserNode if there are no inputs
+ // connected to the node. The node needs to be pulled so that the
+ // internal state is updated with the correct input signal (of
+ // zeroes).
+ if (number_of_input_connections && !need_automatic_pull_) {
+ Context()->GetDeferredTaskHandler().AddAutomaticPullNode(this);
+ need_automatic_pull_ = true;
+ }
+ }
+}
+
+bool AnalyserHandler::RequiresTailProcessing() const {
+ // Tail time is always non-zero so tail processing is required.
+ return true;
+}
+
+double AnalyserHandler::TailTime() const {
+ return RealtimeAnalyser::kMaxFFTSize /
+ static_cast<double>(Context()->sampleRate());
+};
+// ----------------------------------------------------------------
+
+AnalyserNode::AnalyserNode(BaseAudioContext& context)
+ : AudioBasicInspectorNode(context) {
+ SetHandler(AnalyserHandler::Create(*this, context.sampleRate()));
+}
+
+AnalyserNode* AnalyserNode::Create(BaseAudioContext& context,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ if (context.IsContextClosed()) {
+ context.ThrowExceptionForClosedState(exception_state);
+ return nullptr;
+ }
+
+ return new AnalyserNode(context);
+}
+
+AnalyserNode* AnalyserNode::Create(BaseAudioContext* context,
+ const AnalyserOptions& options,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ AnalyserNode* node = Create(*context, exception_state);
+
+ if (!node)
+ return nullptr;
+
+ node->HandleChannelOptions(options, exception_state);
+
+ node->setFftSize(options.fftSize(), exception_state);
+ node->setSmoothingTimeConstant(options.smoothingTimeConstant(),
+ exception_state);
+
+ // minDecibels and maxDecibels have default values. Set both of the values
+ // at once.
+ node->SetMinMaxDecibels(options.minDecibels(), options.maxDecibels(),
+ exception_state);
+
+ return node;
+}
+
+AnalyserHandler& AnalyserNode::GetAnalyserHandler() const {
+ return static_cast<AnalyserHandler&>(Handler());
+}
+
+unsigned AnalyserNode::fftSize() const {
+ return GetAnalyserHandler().FftSize();
+}
+
+void AnalyserNode::setFftSize(unsigned size, ExceptionState& exception_state) {
+ return GetAnalyserHandler().SetFftSize(size, exception_state);
+}
+
+unsigned AnalyserNode::frequencyBinCount() const {
+ return GetAnalyserHandler().FrequencyBinCount();
+}
+
+void AnalyserNode::setMinDecibels(double min, ExceptionState& exception_state) {
+ GetAnalyserHandler().SetMinDecibels(min, exception_state);
+}
+
+double AnalyserNode::minDecibels() const {
+ return GetAnalyserHandler().MinDecibels();
+}
+
+void AnalyserNode::setMaxDecibels(double max, ExceptionState& exception_state) {
+ GetAnalyserHandler().SetMaxDecibels(max, exception_state);
+}
+
+void AnalyserNode::SetMinMaxDecibels(double min,
+ double max,
+ ExceptionState& exception_state) {
+ GetAnalyserHandler().SetMinMaxDecibels(min, max, exception_state);
+}
+
+double AnalyserNode::maxDecibels() const {
+ return GetAnalyserHandler().MaxDecibels();
+}
+
+void AnalyserNode::setSmoothingTimeConstant(double smoothing_time,
+ ExceptionState& exception_state) {
+ GetAnalyserHandler().SetSmoothingTimeConstant(smoothing_time,
+ exception_state);
+}
+
+double AnalyserNode::smoothingTimeConstant() const {
+ return GetAnalyserHandler().SmoothingTimeConstant();
+}
+
+void AnalyserNode::getFloatFrequencyData(NotShared<DOMFloat32Array> array) {
+ GetAnalyserHandler().GetFloatFrequencyData(array.View(),
+ context()->currentTime());
+}
+
+void AnalyserNode::getByteFrequencyData(NotShared<DOMUint8Array> array) {
+ GetAnalyserHandler().GetByteFrequencyData(array.View(),
+ context()->currentTime());
+}
+
+void AnalyserNode::getFloatTimeDomainData(NotShared<DOMFloat32Array> array) {
+ GetAnalyserHandler().GetFloatTimeDomainData(array.View());
+}
+
+void AnalyserNode::getByteTimeDomainData(NotShared<DOMUint8Array> array) {
+ GetAnalyserHandler().GetByteTimeDomainData(array.View());
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/analyser_node.h b/chromium/third_party/blink/renderer/modules/webaudio/analyser_node.h
new file mode 100644
index 00000000000..8d80200dbb7
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/analyser_node.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_ANALYSER_NODE_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_ANALYSER_NODE_H_
+
+#include "third_party/blink/renderer/core/typed_arrays/array_buffer_view_helpers.h"
+#include "third_party/blink/renderer/core/typed_arrays/dom_typed_array.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_basic_inspector_node.h"
+#include "third_party/blink/renderer/modules/webaudio/realtime_analyser.h"
+
+namespace blink {
+
+class BaseAudioContext;
+class AnalyserOptions;
+class ExceptionState;
+
+class AnalyserHandler final : public AudioBasicInspectorHandler {
+ public:
+ static scoped_refptr<AnalyserHandler> Create(AudioNode&, float sample_rate);
+ ~AnalyserHandler() override;
+
+ // AudioHandler
+ void Process(size_t frames_to_process) override;
+
+ unsigned FftSize() const { return analyser_.FftSize(); }
+ void SetFftSize(unsigned size, ExceptionState&);
+
+ unsigned FrequencyBinCount() const { return analyser_.FrequencyBinCount(); }
+
+ void SetMinDecibels(double k, ExceptionState&);
+ double MinDecibels() const { return analyser_.MinDecibels(); }
+
+ void SetMaxDecibels(double k, ExceptionState&);
+ double MaxDecibels() const { return analyser_.MaxDecibels(); }
+
+ void SetMinMaxDecibels(double min, double max, ExceptionState&);
+
+ void SetSmoothingTimeConstant(double k, ExceptionState&);
+ double SmoothingTimeConstant() const {
+ return analyser_.SmoothingTimeConstant();
+ }
+
+ void GetFloatFrequencyData(DOMFloat32Array* array, double current_time) {
+ analyser_.GetFloatFrequencyData(array, current_time);
+ }
+ void GetByteFrequencyData(DOMUint8Array* array, double current_time) {
+ analyser_.GetByteFrequencyData(array, current_time);
+ }
+ void GetFloatTimeDomainData(DOMFloat32Array* array) {
+ analyser_.GetFloatTimeDomainData(array);
+ }
+ void GetByteTimeDomainData(DOMUint8Array* array) {
+ analyser_.GetByteTimeDomainData(array);
+ }
+
+ // AnalyserNode needs special handling when updating the pull status
+ // because the node must get pulled even if there are no inputs or
+ // outputs so that the internal state is properly updated with the
+ // correct time data.
+ void UpdatePullStatus() override;
+
+ bool RequiresTailProcessing() const final;
+ double TailTime() const final;
+
+ private:
+ AnalyserHandler(AudioNode&, float sample_rate);
+ bool PropagatesSilence() const {
+ // An AnalyserNode does actually propogate silence, but to get the
+ // time and FFT data updated correctly, process() needs to be
+ // called even if all the inputs are silent.
+ return false;
+ }
+
+ RealtimeAnalyser analyser_;
+};
+
+class AnalyserNode final : public AudioBasicInspectorNode {
+ DEFINE_WRAPPERTYPEINFO();
+
+ public:
+ static AnalyserNode* Create(BaseAudioContext&, ExceptionState&);
+ static AnalyserNode* Create(BaseAudioContext*,
+ const AnalyserOptions&,
+ ExceptionState&);
+
+ unsigned fftSize() const;
+ void setFftSize(unsigned size, ExceptionState&);
+ unsigned frequencyBinCount() const;
+ void setMinDecibels(double, ExceptionState&);
+ double minDecibels() const;
+ void setMaxDecibels(double, ExceptionState&);
+ double maxDecibels() const;
+ void setSmoothingTimeConstant(double, ExceptionState&);
+ double smoothingTimeConstant() const;
+ void getFloatFrequencyData(NotShared<DOMFloat32Array>);
+ void getByteFrequencyData(NotShared<DOMUint8Array>);
+ void getFloatTimeDomainData(NotShared<DOMFloat32Array>);
+ void getByteTimeDomainData(NotShared<DOMUint8Array>);
+
+ private:
+ AnalyserNode(BaseAudioContext&);
+ AnalyserHandler& GetAnalyserHandler() const;
+
+ void SetMinMaxDecibels(double min, double max, ExceptionState&);
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_ANALYSER_NODE_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/analyser_node.idl b/chromium/third_party/blink/renderer/modules/webaudio/analyser_node.idl
new file mode 100644
index 00000000000..7593662430a
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/analyser_node.idl
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+// See https://webaudio.github.io/web-audio-api/#analysernode
+[
+ Constructor(BaseAudioContext context, optional AnalyserOptions options),
+ RaisesException=Constructor,
+ Measure
+]
+interface AnalyserNode : AudioNode {
+ [RaisesException=Setter] attribute unsigned long fftSize;
+ readonly attribute unsigned long frequencyBinCount;
+
+ // minDecibels / maxDecibels represent the range to scale the FFT analysis data for conversion to unsigned byte values.
+ [RaisesException=Setter] attribute double minDecibels;
+ [RaisesException=Setter] attribute double maxDecibels;
+
+ // A value from 0.0 -> 1.0 where 0.0 represents no time averaging with the last analysis frame.
+ [RaisesException=Setter] attribute double smoothingTimeConstant;
+
+ // Copies the current frequency data into the passed array.
+ // If the array has fewer elements than the frequencyBinCount, the excess elements will be dropped.
+ void getFloatFrequencyData(Float32Array array);
+ void getByteFrequencyData(Uint8Array array);
+
+ // Real-time waveform data
+ void getFloatTimeDomainData(Float32Array array);
+ void getByteTimeDomainData(Uint8Array array);
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/analyser_options.idl b/chromium/third_party/blink/renderer/modules/webaudio/analyser_options.idl
new file mode 100644
index 00000000000..7ad55d37468
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/analyser_options.idl
@@ -0,0 +1,11 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See https://webaudio.github.io/web-audio-api/#dictdef-analyseroptions
+dictionary AnalyserOptions : AudioNodeOptions {
+ unsigned long fftSize = 2048;
+ double maxDecibels = -30;
+ double minDecibels = -100;
+ double smoothingTimeConstant = 0.8;
+}; \ No newline at end of file
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/async_audio_decoder.cc b/chromium/third_party/blink/renderer/modules/webaudio/async_audio_decoder.cc
new file mode 100644
index 00000000000..bd1da12c37d
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/async_audio_decoder.cc
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2011, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#include "third_party/blink/renderer/modules/webaudio/async_audio_decoder.h"
+
+#include "base/location.h"
+#include "third_party/blink/public/platform/platform.h"
+#include "third_party/blink/renderer/core/typed_arrays/dom_array_buffer.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_buffer.h"
+#include "third_party/blink/renderer/modules/webaudio/base_audio_context.h"
+#include "third_party/blink/renderer/platform/audio/audio_bus.h"
+#include "third_party/blink/renderer/platform/audio/audio_file_reader.h"
+#include "third_party/blink/renderer/platform/cross_thread_functional.h"
+#include "third_party/blink/renderer/platform/threading/background_task_runner.h"
+#include "third_party/blink/renderer/platform/web_task_runner.h"
+
+namespace blink {
+
+void AsyncAudioDecoder::DecodeAsync(
+ DOMArrayBuffer* audio_data,
+ float sample_rate,
+ V8PersistentCallbackFunction<V8DecodeSuccessCallback>* success_callback,
+ V8PersistentCallbackFunction<V8DecodeErrorCallback>* error_callback,
+ ScriptPromiseResolver* resolver,
+ BaseAudioContext* context) {
+ DCHECK(IsMainThread());
+ DCHECK(audio_data);
+ if (!audio_data)
+ return;
+
+ BackgroundTaskRunner::PostOnBackgroundThread(
+ FROM_HERE,
+ CrossThreadBind(&AsyncAudioDecoder::DecodeOnBackgroundThread,
+ WrapCrossThreadPersistent(audio_data), sample_rate,
+ WrapCrossThreadPersistent(success_callback),
+ WrapCrossThreadPersistent(error_callback),
+ WrapCrossThreadPersistent(resolver),
+ WrapCrossThreadPersistent(context)));
+}
+
+void AsyncAudioDecoder::DecodeOnBackgroundThread(
+ DOMArrayBuffer* audio_data,
+ float sample_rate,
+ V8PersistentCallbackFunction<V8DecodeSuccessCallback>* success_callback,
+ V8PersistentCallbackFunction<V8DecodeErrorCallback>* error_callback,
+ ScriptPromiseResolver* resolver,
+ BaseAudioContext* context) {
+ DCHECK(!IsMainThread());
+ scoped_refptr<AudioBus> bus = CreateBusFromInMemoryAudioFile(
+ audio_data->Data(), audio_data->ByteLength(), false, sample_rate);
+
+ // Decoding is finished, but we need to do the callbacks on the main thread.
+ // A reference to |*bus| is retained by base::OnceCallback and will be removed
+ // after notifyComplete() is done.
+ //
+ // We also want to avoid notifying the main thread if AudioContext does not
+ // exist any more.
+ if (context) {
+ PostCrossThreadTask(
+ *Platform::Current()->MainThread()->GetTaskRunner(), FROM_HERE,
+ CrossThreadBind(&AsyncAudioDecoder::NotifyComplete,
+ WrapCrossThreadPersistent(audio_data),
+ WrapCrossThreadPersistent(success_callback),
+ WrapCrossThreadPersistent(error_callback),
+ WTF::RetainedRef(std::move(bus)),
+ WrapCrossThreadPersistent(resolver),
+ WrapCrossThreadPersistent(context)));
+ }
+}
+
+void AsyncAudioDecoder::NotifyComplete(
+ DOMArrayBuffer*,
+ V8PersistentCallbackFunction<V8DecodeSuccessCallback>* success_callback,
+ V8PersistentCallbackFunction<V8DecodeErrorCallback>* error_callback,
+ AudioBus* audio_bus,
+ ScriptPromiseResolver* resolver,
+ BaseAudioContext* context) {
+ DCHECK(IsMainThread());
+
+ AudioBuffer* audio_buffer = AudioBuffer::CreateFromAudioBus(audio_bus);
+
+ // If the context is available, let the context finish the notification.
+ if (context) {
+ context->HandleDecodeAudioData(audio_buffer, resolver, success_callback,
+ error_callback);
+ }
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/async_audio_decoder.h b/chromium/third_party/blink/renderer/modules/webaudio/async_audio_decoder.h
new file mode 100644
index 00000000000..372849be732
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/async_audio_decoder.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2011, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_ASYNC_AUDIO_DECODER_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_ASYNC_AUDIO_DECODER_H_
+
+#include <memory>
+
+#include "base/macros.h"
+#include "third_party/blink/renderer/bindings/modules/v8/v8_decode_error_callback.h"
+#include "third_party/blink/renderer/bindings/modules/v8/v8_decode_success_callback.h"
+
+namespace blink {
+
+class AudioBuffer;
+class AudioBus;
+class BaseAudioContext;
+class DOMArrayBuffer;
+class ScriptPromiseResolver;
+
+// AsyncAudioDecoder asynchronously decodes audio file data from a
+// DOMArrayBuffer in the background thread. Upon successful decoding, a
+// completion callback will be invoked with the decoded PCM data in an
+// AudioBuffer.
+
+class AsyncAudioDecoder {
+ DISALLOW_NEW();
+
+ public:
+ AsyncAudioDecoder() = default;
+ ~AsyncAudioDecoder() = default;
+
+ // Must be called on the main thread. |decodeAsync| and callees must not
+ // modify any of the parameters except |audioData|. They are used to
+ // associate this decoding instance with the caller to process the decoding
+ // appropriately when finished.
+ void DecodeAsync(DOMArrayBuffer* audio_data,
+ float sample_rate,
+ V8PersistentCallbackFunction<V8DecodeSuccessCallback>*,
+ V8PersistentCallbackFunction<V8DecodeErrorCallback>*,
+ ScriptPromiseResolver*,
+ BaseAudioContext*);
+
+ private:
+ AudioBuffer* CreateAudioBufferFromAudioBus(AudioBus*);
+ static void DecodeOnBackgroundThread(
+ DOMArrayBuffer* audio_data,
+ float sample_rate,
+ V8PersistentCallbackFunction<V8DecodeSuccessCallback>*,
+ V8PersistentCallbackFunction<V8DecodeErrorCallback>*,
+ ScriptPromiseResolver*,
+ BaseAudioContext*);
+ static void NotifyComplete(
+ DOMArrayBuffer* audio_data,
+ V8PersistentCallbackFunction<V8DecodeSuccessCallback>*,
+ V8PersistentCallbackFunction<V8DecodeErrorCallback>*,
+ AudioBus*,
+ ScriptPromiseResolver*,
+ BaseAudioContext*);
+
+ DISALLOW_COPY_AND_ASSIGN(AsyncAudioDecoder);
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_ASYNC_AUDIO_DECODER_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_basic_inspector_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_basic_inspector_node.cc
new file mode 100644
index 00000000000..d7f02ffd28c
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_basic_inspector_node.cc
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2012, Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#include "third_party/blink/renderer/modules/webaudio/audio_basic_inspector_node.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node_input.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node_output.h"
+#include "third_party/blink/renderer/modules/webaudio/base_audio_context.h"
+
+namespace blink {
+
+AudioBasicInspectorHandler::AudioBasicInspectorHandler(
+ NodeType node_type,
+ AudioNode& node,
+ float sample_rate,
+ unsigned output_channel_count)
+ : AudioHandler(node_type, node, sample_rate), need_automatic_pull_(false) {
+ AddInput();
+ AddOutput(output_channel_count);
+}
+
+// We override pullInputs() as an optimization allowing this node to take
+// advantage of in-place processing, where the input is simply passed through
+// unprocessed to the output.
+// Note: this only applies if the input and output channel counts match.
+void AudioBasicInspectorHandler::PullInputs(size_t frames_to_process) {
+ // Render input stream - try to render directly into output bus for
+ // pass-through processing where process() doesn't need to do anything...
+ Input(0).Pull(Output(0).Bus(), frames_to_process);
+}
+
+AudioNode* AudioBasicInspectorNode::connect(AudioNode* destination,
+ unsigned output_index,
+ unsigned input_index,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ BaseAudioContext::GraphAutoLocker locker(context());
+
+ AudioNode::connect(destination, output_index, input_index, exception_state);
+ static_cast<AudioBasicInspectorHandler&>(Handler()).UpdatePullStatus();
+
+ return destination;
+}
+
+void AudioBasicInspectorNode::disconnect(unsigned output_index,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ BaseAudioContext::GraphAutoLocker locker(context());
+
+ AudioNode::disconnect(output_index, exception_state);
+ static_cast<AudioBasicInspectorHandler&>(Handler()).UpdatePullStatus();
+}
+
+void AudioBasicInspectorHandler::CheckNumberOfChannelsForInput(
+ AudioNodeInput* input) {
+ DCHECK(Context()->IsAudioThread());
+ DCHECK(Context()->IsGraphOwner());
+
+ DCHECK_EQ(input, &this->Input(0));
+ if (input != &this->Input(0))
+ return;
+
+ unsigned number_of_channels = input->NumberOfChannels();
+
+ if (number_of_channels != Output(0).NumberOfChannels()) {
+ // This will propagate the channel count to any nodes connected further
+ // downstream in the graph.
+ Output(0).SetNumberOfChannels(number_of_channels);
+ }
+
+ AudioHandler::CheckNumberOfChannelsForInput(input);
+
+ UpdatePullStatus();
+}
+
+void AudioBasicInspectorHandler::UpdatePullStatus() {
+ DCHECK(Context()->IsGraphOwner());
+
+ if (Output(0).IsConnected()) {
+ // When an AudioBasicInspectorNode is connected to a downstream node, it
+ // will get pulled by the downstream node, thus remove it from the context's
+ // automatic pull list.
+ if (need_automatic_pull_) {
+ Context()->GetDeferredTaskHandler().RemoveAutomaticPullNode(this);
+ need_automatic_pull_ = false;
+ }
+ } else {
+ unsigned number_of_input_connections =
+ Input(0).NumberOfRenderingConnections();
+ if (number_of_input_connections && !need_automatic_pull_) {
+ // When an AudioBasicInspectorNode is not connected to any downstream node
+ // while still connected from upstream node(s), add it to the context's
+ // automatic pull list.
+ Context()->GetDeferredTaskHandler().AddAutomaticPullNode(this);
+ need_automatic_pull_ = true;
+ } else if (!number_of_input_connections && need_automatic_pull_) {
+ // The AudioBasicInspectorNode is connected to nothing and is
+ // not an AnalyserNode, remove it from the context's automatic
+ // pull list. AnalyserNode's need to be pulled even with no
+ // inputs so that the internal state gets updated to hold the
+ // right time and FFT data.
+ Context()->GetDeferredTaskHandler().RemoveAutomaticPullNode(this);
+ need_automatic_pull_ = false;
+ }
+ }
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_basic_inspector_node.h b/chromium/third_party/blink/renderer/modules/webaudio/audio_basic_inspector_node.h
new file mode 100644
index 00000000000..68af198b837
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_basic_inspector_node.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2012, Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_BASIC_INSPECTOR_NODE_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_BASIC_INSPECTOR_NODE_H_
+
+#include "third_party/blink/renderer/modules/webaudio/audio_node.h"
+
+namespace blink {
+
+class BaseAudioContext;
+class ExceptionState;
+
+// AudioBasicInspectorNode is an AudioNode with one input and one output where
+// the output might not necessarily connect to another node's input.
+// If the output is not connected to any other node, then the
+// AudioBasicInspectorNode's processIfNecessary() function will be called
+// automatically by BaseAudioContext before the end of each render quantum so
+// that it can inspect the audio stream.
+class AudioBasicInspectorHandler : public AudioHandler {
+ public:
+ AudioBasicInspectorHandler(NodeType,
+ AudioNode&,
+ float sample_rate,
+ unsigned output_channel_count);
+
+ // AudioHandler
+ void PullInputs(size_t frames_to_process) final;
+ void CheckNumberOfChannelsForInput(AudioNodeInput*) final;
+
+ // AudioNode
+ double TailTime() const override { return 0; }
+ double LatencyTime() const override { return 0; }
+
+ virtual void UpdatePullStatus();
+
+ protected:
+ // When setting to true, AudioBasicInspectorHandler will be pulled
+ // automaticlly by BaseAudioContext before the end of each render quantum.
+ bool need_automatic_pull_;
+};
+
+class AudioBasicInspectorNode : public AudioNode {
+ protected:
+ explicit AudioBasicInspectorNode(BaseAudioContext& context)
+ : AudioNode(context) {}
+
+ private:
+ // TODO(tkent): Should AudioBasicInspectorNode override other variants of
+ // connect() and disconnect()?
+ AudioNode* connect(AudioNode*,
+ unsigned output_index,
+ unsigned input_index,
+ ExceptionState&) final;
+ void disconnect(unsigned output_index, ExceptionState&) final;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_BASIC_INSPECTOR_NODE_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_basic_processor_handler.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_basic_processor_handler.cc
new file mode 100644
index 00000000000..57cdc41521b
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_basic_processor_handler.cc
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#include <memory>
+#include "third_party/blink/renderer/modules/webaudio/audio_basic_processor_handler.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node_input.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node_output.h"
+#include "third_party/blink/renderer/platform/audio/audio_bus.h"
+#include "third_party/blink/renderer/platform/audio/audio_processor.h"
+
+namespace blink {
+
+AudioBasicProcessorHandler::AudioBasicProcessorHandler(
+ NodeType node_type,
+ AudioNode& node,
+ float sample_rate,
+ std::unique_ptr<AudioProcessor> processor)
+ : AudioHandler(node_type, node, sample_rate),
+ processor_(std::move(processor)) {
+ AddInput();
+ AddOutput(1);
+}
+
+AudioBasicProcessorHandler::~AudioBasicProcessorHandler() {
+ // Safe to call the uninitialize() because it's final.
+ Uninitialize();
+}
+
+void AudioBasicProcessorHandler::Initialize() {
+ if (IsInitialized())
+ return;
+
+ DCHECK(Processor());
+ Processor()->Initialize();
+
+ AudioHandler::Initialize();
+}
+
+void AudioBasicProcessorHandler::Uninitialize() {
+ if (!IsInitialized())
+ return;
+
+ DCHECK(Processor());
+ Processor()->Uninitialize();
+
+ AudioHandler::Uninitialize();
+}
+
+void AudioBasicProcessorHandler::Process(size_t frames_to_process) {
+ AudioBus* destination_bus = Output(0).Bus();
+
+ if (!IsInitialized() || !Processor() ||
+ Processor()->NumberOfChannels() != NumberOfChannels()) {
+ destination_bus->Zero();
+ } else {
+ AudioBus* source_bus = Input(0).Bus();
+
+ // FIXME: if we take "tail time" into account, then we can avoid calling
+ // processor()->process() once the tail dies down.
+ if (!Input(0).IsConnected())
+ source_bus->Zero();
+
+ Processor()->Process(source_bus, destination_bus, frames_to_process);
+ }
+}
+
+void AudioBasicProcessorHandler::ProcessOnlyAudioParams(
+ size_t frames_to_process) {
+ if (!IsInitialized() || !Processor())
+ return;
+
+ Processor()->ProcessOnlyAudioParams(frames_to_process);
+}
+
+// Nice optimization in the very common case allowing for "in-place" processing
+void AudioBasicProcessorHandler::PullInputs(size_t frames_to_process) {
+ // Render input stream - suggest to the input to render directly into output
+ // bus for in-place processing in process() if possible.
+ Input(0).Pull(Output(0).Bus(), frames_to_process);
+}
+
+// As soon as we know the channel count of our input, we can lazily initialize.
+// Sometimes this may be called more than once with different channel counts, in
+// which case we must safely uninitialize and then re-initialize with the new
+// channel count.
+void AudioBasicProcessorHandler::CheckNumberOfChannelsForInput(
+ AudioNodeInput* input) {
+ DCHECK(Context()->IsAudioThread());
+ DCHECK(Context()->IsGraphOwner());
+
+ DCHECK_EQ(input, &this->Input(0));
+ if (input != &this->Input(0))
+ return;
+
+ DCHECK(Processor());
+ if (!Processor())
+ return;
+
+ unsigned number_of_channels = input->NumberOfChannels();
+
+ if (IsInitialized() && number_of_channels != Output(0).NumberOfChannels()) {
+ // We're already initialized but the channel count has changed.
+ Uninitialize();
+ }
+
+ if (!IsInitialized()) {
+ // This will propagate the channel count to any nodes connected further down
+ // the chain...
+ Output(0).SetNumberOfChannels(number_of_channels);
+
+ // Re-initialize the processor with the new channel count.
+ Processor()->SetNumberOfChannels(number_of_channels);
+ Initialize();
+ }
+
+ AudioHandler::CheckNumberOfChannelsForInput(input);
+}
+
+unsigned AudioBasicProcessorHandler::NumberOfChannels() {
+ return Output(0).NumberOfChannels();
+}
+
+bool AudioBasicProcessorHandler::RequiresTailProcessing() const {
+ return processor_->RequiresTailProcessing();
+}
+
+double AudioBasicProcessorHandler::TailTime() const {
+ return processor_->TailTime();
+}
+
+double AudioBasicProcessorHandler::LatencyTime() const {
+ return processor_->LatencyTime();
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_basic_processor_handler.h b/chromium/third_party/blink/renderer/modules/webaudio/audio_basic_processor_handler.h
new file mode 100644
index 00000000000..056e60f0ffb
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_basic_processor_handler.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_BASIC_PROCESSOR_HANDLER_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_BASIC_PROCESSOR_HANDLER_H_
+
+#include <memory>
+#include "third_party/blink/renderer/modules/modules_export.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node.h"
+#include "third_party/blink/renderer/platform/wtf/forward.h"
+
+namespace blink {
+
+class AudioNodeInput;
+class AudioProcessor;
+
+// AudioBasicProcessorHandler is an AudioHandler with one input and one output
+// where the input and output have the same number of channels.
+class MODULES_EXPORT AudioBasicProcessorHandler : public AudioHandler {
+ public:
+ ~AudioBasicProcessorHandler() override;
+
+ // AudioHandler
+ void Process(size_t frames_to_process) final;
+ void ProcessOnlyAudioParams(size_t frames_to_process) final;
+ void PullInputs(size_t frames_to_process) final;
+ void Initialize() final;
+ void Uninitialize() final;
+
+ // Called in the main thread when the number of channels for the input may
+ // have changed.
+ void CheckNumberOfChannelsForInput(AudioNodeInput*) final;
+
+ // Returns the number of channels for both the input and the output.
+ unsigned NumberOfChannels();
+ AudioProcessor* Processor() { return processor_.get(); }
+
+ protected:
+ AudioBasicProcessorHandler(NodeType,
+ AudioNode&,
+ float sample_rate,
+ std::unique_ptr<AudioProcessor>);
+ private:
+ bool RequiresTailProcessing() const final;
+ double TailTime() const final;
+ double LatencyTime() const final;
+
+ std::unique_ptr<AudioProcessor> processor_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_BASIC_PROCESSOR_HANDLER_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_basic_processor_handler_test.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_basic_processor_handler_test.cc
new file mode 100644
index 00000000000..bb908aed720
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_basic_processor_handler_test.cc
@@ -0,0 +1,69 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <memory>
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/blink/renderer/core/testing/dummy_page_holder.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_basic_processor_handler.h"
+#include "third_party/blink/renderer/modules/webaudio/offline_audio_context.h"
+#include "third_party/blink/renderer/platform/audio/audio_processor.h"
+
+namespace blink {
+
+class MockAudioProcessor final : public AudioProcessor {
+ public:
+ MockAudioProcessor() : AudioProcessor(48000, 2) {}
+ void Initialize() override { initialized_ = true; }
+ void Uninitialize() override { initialized_ = false; }
+ void Process(const AudioBus*, AudioBus*, size_t) override {}
+ void Reset() override {}
+ void SetNumberOfChannels(unsigned) override {}
+ unsigned NumberOfChannels() const override { return number_of_channels_; }
+ bool RequiresTailProcessing() const override { return true; }
+ double TailTime() const override { return 0; }
+ double LatencyTime() const override { return 0; }
+};
+
+class MockProcessorHandler final : public AudioBasicProcessorHandler {
+ public:
+ static scoped_refptr<MockProcessorHandler> Create(AudioNode& node,
+ float sample_rate) {
+ return base::AdoptRef(new MockProcessorHandler(node, sample_rate));
+ }
+
+ private:
+ MockProcessorHandler(AudioNode& node, float sample_rate)
+ : AudioBasicProcessorHandler(AudioHandler::kNodeTypeWaveShaper,
+ node,
+ sample_rate,
+ std::make_unique<MockAudioProcessor>()) {
+ Initialize();
+ }
+};
+
+class MockProcessorNode final : public AudioNode {
+ public:
+ MockProcessorNode(BaseAudioContext& context) : AudioNode(context) {
+ SetHandler(MockProcessorHandler::Create(*this, 48000));
+ }
+};
+
+TEST(AudioBasicProcessorHandlerTest, ProcessorFinalization) {
+ std::unique_ptr<DummyPageHolder> page = DummyPageHolder::Create();
+ OfflineAudioContext* context = OfflineAudioContext::Create(
+ &page->GetDocument(), 2, 1, 48000, ASSERT_NO_EXCEPTION);
+ MockProcessorNode* node = new MockProcessorNode(*context);
+ AudioBasicProcessorHandler& handler =
+ static_cast<AudioBasicProcessorHandler&>(node->Handler());
+ EXPECT_TRUE(handler.Processor());
+ EXPECT_TRUE(handler.Processor()->IsInitialized());
+ BaseAudioContext::GraphAutoLocker locker(context);
+ handler.Dispose();
+ // The AudioProcessor should live after dispose() and should not be
+ // finalized because an audio thread is using it.
+ EXPECT_TRUE(handler.Processor());
+ EXPECT_TRUE(handler.Processor()->IsInitialized());
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_buffer.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_buffer.cc
new file mode 100644
index 00000000000..71da68ca003
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_buffer.cc
@@ -0,0 +1,345 @@
+/*
+ * Copyright (C) 2010 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "third_party/blink/renderer/modules/webaudio/audio_buffer.h"
+
+#include "third_party/blink/renderer/bindings/core/v8/exception_messages.h"
+#include "third_party/blink/renderer/bindings/core/v8/exception_state.h"
+#include "third_party/blink/renderer/core/dom/exception_code.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_buffer_options.h"
+#include "third_party/blink/renderer/modules/webaudio/base_audio_context.h"
+#include "third_party/blink/renderer/platform/audio/audio_bus.h"
+#include "third_party/blink/renderer/platform/audio/audio_file_reader.h"
+#include "third_party/blink/renderer/platform/audio/audio_utilities.h"
+#include "third_party/blink/renderer/platform/wtf/typed_arrays/float32_array.h"
+
+namespace blink {
+
+AudioBuffer* AudioBuffer::Create(unsigned number_of_channels,
+ size_t number_of_frames,
+ float sample_rate) {
+ if (!AudioUtilities::IsValidAudioBufferSampleRate(sample_rate) ||
+ number_of_channels > BaseAudioContext::MaxNumberOfChannels() ||
+ !number_of_channels || !number_of_frames)
+ return nullptr;
+
+ AudioBuffer* buffer =
+ new AudioBuffer(number_of_channels, number_of_frames, sample_rate);
+
+ if (!buffer->CreatedSuccessfully(number_of_channels))
+ return nullptr;
+ return buffer;
+}
+
+AudioBuffer* AudioBuffer::Create(unsigned number_of_channels,
+ size_t number_of_frames,
+ float sample_rate,
+ ExceptionState& exception_state) {
+ if (!number_of_channels ||
+ number_of_channels > BaseAudioContext::MaxNumberOfChannels()) {
+ exception_state.ThrowDOMException(
+ kNotSupportedError, ExceptionMessages::IndexOutsideRange(
+ "number of channels", number_of_channels, 1u,
+ ExceptionMessages::kInclusiveBound,
+ BaseAudioContext::MaxNumberOfChannels(),
+ ExceptionMessages::kInclusiveBound));
+ return nullptr;
+ }
+
+ if (!AudioUtilities::IsValidAudioBufferSampleRate(sample_rate)) {
+ exception_state.ThrowDOMException(
+ kNotSupportedError, ExceptionMessages::IndexOutsideRange(
+ "sample rate", sample_rate,
+ AudioUtilities::MinAudioBufferSampleRate(),
+ ExceptionMessages::kInclusiveBound,
+ AudioUtilities::MaxAudioBufferSampleRate(),
+ ExceptionMessages::kInclusiveBound));
+ return nullptr;
+ }
+
+ if (!number_of_frames) {
+ exception_state.ThrowDOMException(
+ kNotSupportedError,
+ ExceptionMessages::IndexExceedsMinimumBound(
+ "number of frames", number_of_frames, static_cast<size_t>(0)));
+ return nullptr;
+ }
+
+ AudioBuffer* audio_buffer =
+ Create(number_of_channels, number_of_frames, sample_rate);
+
+ if (!audio_buffer) {
+ exception_state.ThrowDOMException(
+ kNotSupportedError, "createBuffer(" +
+ String::Number(number_of_channels) + ", " +
+ String::Number(number_of_frames) + ", " +
+ String::Number(sample_rate) + ") failed.");
+ }
+
+ return audio_buffer;
+}
+
+AudioBuffer* AudioBuffer::Create(const AudioBufferOptions& options,
+ ExceptionState& exception_state) {
+ return Create(options.numberOfChannels(), options.length(),
+ options.sampleRate(), exception_state);
+}
+
+AudioBuffer* AudioBuffer::CreateUninitialized(unsigned number_of_channels,
+ size_t number_of_frames,
+ float sample_rate) {
+ if (!AudioUtilities::IsValidAudioBufferSampleRate(sample_rate) ||
+ number_of_channels > BaseAudioContext::MaxNumberOfChannels() ||
+ !number_of_channels || !number_of_frames)
+ return nullptr;
+
+ AudioBuffer* buffer = new AudioBuffer(number_of_channels, number_of_frames,
+ sample_rate, kDontInitialize);
+
+ if (!buffer->CreatedSuccessfully(number_of_channels))
+ return nullptr;
+ return buffer;
+}
+
+AudioBuffer* AudioBuffer::CreateFromAudioFileData(const void* data,
+ size_t data_size,
+ bool mix_to_mono,
+ float sample_rate) {
+ scoped_refptr<AudioBus> bus =
+ CreateBusFromInMemoryAudioFile(data, data_size, mix_to_mono, sample_rate);
+ if (bus) {
+ AudioBuffer* buffer = new AudioBuffer(bus.get());
+ if (buffer->CreatedSuccessfully(bus->NumberOfChannels()))
+ return buffer;
+ }
+
+ return nullptr;
+}
+
+AudioBuffer* AudioBuffer::CreateFromAudioBus(AudioBus* bus) {
+ if (!bus)
+ return nullptr;
+ AudioBuffer* buffer = new AudioBuffer(bus);
+ if (buffer->CreatedSuccessfully(bus->NumberOfChannels()))
+ return buffer;
+ return nullptr;
+}
+
+bool AudioBuffer::CreatedSuccessfully(
+ unsigned desired_number_of_channels) const {
+ return numberOfChannels() == desired_number_of_channels;
+}
+
+DOMFloat32Array* AudioBuffer::CreateFloat32ArrayOrNull(
+ size_t length,
+ InitializationPolicy policy) {
+ scoped_refptr<WTF::Float32Array> buffer;
+
+ switch (policy) {
+ case kZeroInitialize:
+ buffer = WTF::Float32Array::CreateOrNull(length);
+ break;
+ case kDontInitialize:
+ buffer = WTF::Float32Array::CreateUninitializedOrNull(length);
+ break;
+ default:
+ NOTREACHED();
+ break;
+ }
+
+ if (!buffer) {
+ return nullptr;
+ }
+ return DOMFloat32Array::Create(std::move(buffer));
+}
+
+AudioBuffer::AudioBuffer(unsigned number_of_channels,
+ size_t number_of_frames,
+ float sample_rate,
+ InitializationPolicy policy)
+ : sample_rate_(sample_rate), length_(number_of_frames) {
+ channels_.ReserveCapacity(number_of_channels);
+
+ for (unsigned i = 0; i < number_of_channels; ++i) {
+ DOMFloat32Array* channel_data_array =
+ CreateFloat32ArrayOrNull(length_, policy);
+ // If the channel data array could not be created, just return. The caller
+ // will need to check that the desired number of channels were created.
+ if (!channel_data_array)
+ return;
+
+ channel_data_array->SetNeuterable(false);
+ channels_.push_back(channel_data_array);
+ }
+}
+
+AudioBuffer::AudioBuffer(AudioBus* bus)
+ : sample_rate_(bus->SampleRate()), length_(bus->length()) {
+ // Copy audio data from the bus to the Float32Arrays we manage.
+ unsigned number_of_channels = bus->NumberOfChannels();
+ channels_.ReserveCapacity(number_of_channels);
+ for (unsigned i = 0; i < number_of_channels; ++i) {
+ DOMFloat32Array* channel_data_array =
+ CreateFloat32ArrayOrNull(length_, kDontInitialize);
+ // If the channel data array could not be created, just return. The caller
+ // will need to check that the desired number of channels were created.
+ if (!channel_data_array)
+ return;
+
+ channel_data_array->SetNeuterable(false);
+ const float* src = bus->Channel(i)->Data();
+ float* dst = channel_data_array->Data();
+ memmove(dst, src, length_ * sizeof(*dst));
+ channels_.push_back(channel_data_array);
+ }
+}
+
+NotShared<DOMFloat32Array> AudioBuffer::getChannelData(
+ unsigned channel_index,
+ ExceptionState& exception_state) {
+ if (channel_index >= channels_.size()) {
+ exception_state.ThrowDOMException(
+ kIndexSizeError, "channel index (" + String::Number(channel_index) +
+ ") exceeds number of channels (" +
+ String::Number(channels_.size()) + ")");
+ return NotShared<DOMFloat32Array>(nullptr);
+ }
+
+ return getChannelData(channel_index);
+}
+
+NotShared<DOMFloat32Array> AudioBuffer::getChannelData(unsigned channel_index) {
+ if (channel_index >= channels_.size())
+ return NotShared<DOMFloat32Array>(nullptr);
+
+ return NotShared<DOMFloat32Array>(channels_[channel_index].Get());
+}
+
+void AudioBuffer::copyFromChannel(NotShared<DOMFloat32Array> destination,
+ long channel_number,
+ ExceptionState& exception_state) {
+ return copyFromChannel(destination, channel_number, 0, exception_state);
+}
+
+void AudioBuffer::copyFromChannel(NotShared<DOMFloat32Array> destination,
+ long channel_number,
+ unsigned long start_in_channel,
+ ExceptionState& exception_state) {
+ if (channel_number < 0 ||
+ channel_number >= static_cast<long>(channels_.size())) {
+ exception_state.ThrowDOMException(
+ kIndexSizeError, ExceptionMessages::IndexOutsideRange(
+ "channelNumber", channel_number, 0L,
+ ExceptionMessages::kInclusiveBound,
+ static_cast<long>(channels_.size() - 1),
+ ExceptionMessages::kInclusiveBound));
+
+ return;
+ }
+
+ DOMFloat32Array* channel_data = channels_[channel_number].Get();
+
+ if (start_in_channel >= channel_data->length()) {
+ exception_state.ThrowDOMException(
+ kIndexSizeError, ExceptionMessages::IndexOutsideRange(
+ "startInChannel", start_in_channel, 0UL,
+ ExceptionMessages::kInclusiveBound,
+ static_cast<unsigned long>(channel_data->length()),
+ ExceptionMessages::kExclusiveBound));
+
+ return;
+ }
+
+ unsigned count = channel_data->length() - start_in_channel;
+ count = std::min(destination.View()->length(), count);
+
+ const float* src = channel_data->Data();
+ float* dst = destination.View()->Data();
+
+ DCHECK(src);
+ DCHECK(dst);
+
+ memcpy(dst, src + start_in_channel, count * sizeof(*src));
+}
+
+void AudioBuffer::copyToChannel(NotShared<DOMFloat32Array> source,
+ long channel_number,
+ ExceptionState& exception_state) {
+ return copyToChannel(source, channel_number, 0, exception_state);
+}
+
+void AudioBuffer::copyToChannel(NotShared<DOMFloat32Array> source,
+ long channel_number,
+ unsigned long start_in_channel,
+ ExceptionState& exception_state) {
+ if (channel_number < 0 ||
+ channel_number >= static_cast<long>(channels_.size())) {
+ exception_state.ThrowDOMException(
+ kIndexSizeError, ExceptionMessages::IndexOutsideRange(
+ "channelNumber", channel_number, 0L,
+ ExceptionMessages::kInclusiveBound,
+ static_cast<long>(channels_.size() - 1),
+ ExceptionMessages::kInclusiveBound));
+ return;
+ }
+
+ DOMFloat32Array* channel_data = channels_[channel_number].Get();
+
+ if (start_in_channel >= channel_data->length()) {
+ exception_state.ThrowDOMException(
+ kIndexSizeError, ExceptionMessages::IndexOutsideRange(
+ "startInChannel", start_in_channel, 0UL,
+ ExceptionMessages::kInclusiveBound,
+ static_cast<unsigned long>(channel_data->length()),
+ ExceptionMessages::kExclusiveBound));
+
+ return;
+ }
+
+ unsigned count = channel_data->length() - start_in_channel;
+ count = std::min(source.View()->length(), count);
+
+ const float* src = source.View()->Data();
+ float* dst = channel_data->Data();
+
+ DCHECK(src);
+ DCHECK(dst);
+
+ memcpy(dst + start_in_channel, src, count * sizeof(*dst));
+}
+
+void AudioBuffer::Zero() {
+ for (unsigned i = 0; i < channels_.size(); ++i) {
+ if (NotShared<DOMFloat32Array> array = getChannelData(i)) {
+ float* data = array.View()->Data();
+ memset(data, 0, length() * sizeof(*data));
+ }
+ }
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_buffer.h b/chromium/third_party/blink/renderer/modules/webaudio/audio_buffer.h
new file mode 100644
index 00000000000..aafe25a269a
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_buffer.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2010 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_BUFFER_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_BUFFER_H_
+
+#include "base/memory/scoped_refptr.h"
+#include "third_party/blink/renderer/core/typed_arrays/array_buffer_view_helpers.h"
+#include "third_party/blink/renderer/core/typed_arrays/dom_typed_array.h"
+#include "third_party/blink/renderer/modules/modules_export.h"
+#include "third_party/blink/renderer/platform/bindings/script_wrappable.h"
+#include "third_party/blink/renderer/platform/wtf/vector.h"
+
+namespace blink {
+
+class AudioBus;
+class AudioBufferOptions;
+class ExceptionState;
+
+class MODULES_EXPORT AudioBuffer final : public ScriptWrappable {
+ DEFINE_WRAPPERTYPEINFO();
+
+ public:
+ static AudioBuffer* Create(unsigned number_of_channels,
+ size_t number_of_frames,
+ float sample_rate);
+ static AudioBuffer* Create(unsigned number_of_channels,
+ size_t number_of_frames,
+ float sample_rate,
+ ExceptionState&);
+ static AudioBuffer* Create(const AudioBufferOptions&, ExceptionState&);
+
+ // Creates an AudioBuffer with uninitialized contents. This should
+ // only be used where we are guaranteed to initialize the contents
+ // with valid data and where JS cannot access until initializations
+ // is done. |OfflineAudioContext::startRendering()| is one such
+ // place.
+ static AudioBuffer* CreateUninitialized(unsigned number_of_channels,
+ size_t number_of_frames,
+ float sample_rate);
+
+ // Returns 0 if data is not a valid audio file.
+ static AudioBuffer* CreateFromAudioFileData(const void* data,
+ size_t data_size,
+ bool mix_to_mono,
+ float sample_rate);
+
+ static AudioBuffer* CreateFromAudioBus(AudioBus*);
+
+ // Format
+ size_t length() const { return length_; }
+ double duration() const {
+ return length() / static_cast<double>(sampleRate());
+ }
+ float sampleRate() const { return sample_rate_; }
+
+ // Channel data access
+ unsigned numberOfChannels() const { return channels_.size(); }
+ NotShared<DOMFloat32Array> getChannelData(unsigned channel_index,
+ ExceptionState&);
+ NotShared<DOMFloat32Array> getChannelData(unsigned channel_index);
+ void copyFromChannel(NotShared<DOMFloat32Array>,
+ long channel_number,
+ ExceptionState&);
+ void copyFromChannel(NotShared<DOMFloat32Array>,
+ long channel_number,
+ unsigned long start_in_channel,
+ ExceptionState&);
+ void copyToChannel(NotShared<DOMFloat32Array>,
+ long channel_number,
+ ExceptionState&);
+ void copyToChannel(NotShared<DOMFloat32Array>,
+ long channel_number,
+ unsigned long start_in_channel,
+ ExceptionState&);
+
+ void Zero();
+
+ void Trace(blink::Visitor* visitor) {
+ visitor->Trace(channels_);
+ ScriptWrappable::Trace(visitor);
+ }
+
+ private:
+ // How to initialize the contents of an AudioBuffer. Default is to
+ // zero-initialize (|kZeroInitialize|). Otherwise, leave the array
+ // uninitialized (|kDontInitialize|).
+ enum InitializationPolicy { kZeroInitialize, kDontInitialize };
+
+ explicit AudioBuffer(AudioBus*);
+
+ static DOMFloat32Array* CreateFloat32ArrayOrNull(
+ size_t length,
+ InitializationPolicy allocation_policy = kZeroInitialize);
+
+ AudioBuffer(unsigned number_of_channels,
+ size_t number_of_frames,
+ float sample_rate,
+ InitializationPolicy allocation_policy = kZeroInitialize);
+ bool CreatedSuccessfully(unsigned desired_number_of_channels) const;
+
+ float sample_rate_;
+ size_t length_;
+
+ HeapVector<Member<DOMFloat32Array>> channels_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_BUFFER_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_buffer.idl b/chromium/third_party/blink/renderer/modules/webaudio/audio_buffer.idl
new file mode 100644
index 00000000000..08e683ee254
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_buffer.idl
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2010 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// See https://webaudio.github.io/web-audio-api/#AudioBuffer
+[
+ Constructor(AudioBufferOptions options),
+ RaisesException=Constructor,
+ Measure
+] interface AudioBuffer {
+ readonly attribute long length; // in sample-frames
+ readonly attribute double duration; // in seconds
+ readonly attribute float sampleRate; // in sample-frames per second
+
+ // Channel access
+ readonly attribute unsigned long numberOfChannels;
+ [RaisesException] Float32Array getChannelData(unsigned long channelIndex);
+ [RaisesException] void copyFromChannel(Float32Array destination, long channelNumber, optional unsigned long startInChannel = 0);
+ [RaisesException] void copyToChannel(Float32Array source, long channelNumber, optional unsigned long startInChannel = 0);
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_buffer_options.idl b/chromium/third_party/blink/renderer/modules/webaudio/audio_buffer_options.idl
new file mode 100644
index 00000000000..a021652620a
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_buffer_options.idl
@@ -0,0 +1,10 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See https://webaudio.github.io/web-audio-api/#audiobufferoptions
+dictionary AudioBufferOptions {
+ unsigned long numberOfChannels = 1;
+ required unsigned long length;
+ required float sampleRate;
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_buffer_source_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_buffer_source_node.cc
new file mode 100644
index 00000000000..15c0b19d681
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_buffer_source_node.cc
@@ -0,0 +1,772 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#include <algorithm>
+#include "third_party/blink/renderer/bindings/core/v8/exception_messages.h"
+#include "third_party/blink/renderer/bindings/core/v8/exception_state.h"
+#include "third_party/blink/renderer/core/dom/exception_code.h"
+#include "third_party/blink/renderer/core/frame/use_counter.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_buffer_source_node.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_buffer_source_options.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node_output.h"
+#include "third_party/blink/renderer/modules/webaudio/base_audio_context.h"
+#include "third_party/blink/renderer/platform/audio/audio_utilities.h"
+#include "third_party/blink/renderer/platform/wtf/math_extras.h"
+
+namespace blink {
+
+const double kDefaultGrainDuration = 0.020; // 20ms
+
+// Arbitrary upper limit on playback rate.
+// Higher than expected rates can be useful when playing back oversampled
+// buffers to minimize linear interpolation aliasing.
+const double kMaxRate = 1024;
+
+// Number of extra frames to use when determining if a source node can be
+// stopped. This should be at least one rendering quantum, but we add one more
+// quantum for good measure. This doesn't need to be extra precise, just more
+// than one rendering quantum. See |handleStoppableSourceNode()|.
+// FIXME: Expose the rendering quantum somehow instead of hardwiring a value
+// here.
+const int kExtraStopFrames = 256;
+
+AudioBufferSourceHandler::AudioBufferSourceHandler(
+ AudioNode& node,
+ float sample_rate,
+ AudioParamHandler& playback_rate,
+ AudioParamHandler& detune)
+ : AudioScheduledSourceHandler(kNodeTypeAudioBufferSource,
+ node,
+ sample_rate),
+ buffer_(nullptr),
+ playback_rate_(&playback_rate),
+ detune_(&detune),
+ is_looping_(false),
+ did_set_looping_(false),
+ loop_start_(0),
+ loop_end_(0),
+ virtual_read_index_(0),
+ is_grain_(false),
+ grain_offset_(0.0),
+ grain_duration_(kDefaultGrainDuration),
+ min_playback_rate_(1.0),
+ buffer_has_been_set_(false) {
+ // Default to mono. A call to setBuffer() will set the number of output
+ // channels to that of the buffer.
+ AddOutput(1);
+
+ Initialize();
+}
+
+scoped_refptr<AudioBufferSourceHandler> AudioBufferSourceHandler::Create(
+ AudioNode& node,
+ float sample_rate,
+ AudioParamHandler& playback_rate,
+ AudioParamHandler& detune) {
+ return base::AdoptRef(
+ new AudioBufferSourceHandler(node, sample_rate, playback_rate, detune));
+}
+
+AudioBufferSourceHandler::~AudioBufferSourceHandler() {
+ Uninitialize();
+}
+
+void AudioBufferSourceHandler::Process(size_t frames_to_process) {
+ AudioBus* output_bus = Output(0).Bus();
+
+ if (!IsInitialized()) {
+ output_bus->Zero();
+ return;
+ }
+
+ // The audio thread can't block on this lock, so we call tryLock() instead.
+ MutexTryLocker try_locker(process_lock_);
+ if (try_locker.Locked()) {
+ if (!Buffer()) {
+ output_bus->Zero();
+ return;
+ }
+
+ // After calling setBuffer() with a buffer having a different number of
+ // channels, there can in rare cases be a slight delay before the output bus
+ // is updated to the new number of channels because of use of tryLocks() in
+ // the context's updating system. In this case, if the the buffer has just
+ // been changed and we're not quite ready yet, then just output silence.
+ if (NumberOfChannels() != Buffer()->numberOfChannels()) {
+ output_bus->Zero();
+ return;
+ }
+
+ size_t quantum_frame_offset;
+ size_t buffer_frames_to_process;
+ double start_time_offset;
+
+ UpdateSchedulingInfo(frames_to_process, output_bus, quantum_frame_offset,
+ buffer_frames_to_process, start_time_offset);
+
+ if (!buffer_frames_to_process) {
+ output_bus->Zero();
+ return;
+ }
+
+ for (unsigned i = 0; i < output_bus->NumberOfChannels(); ++i)
+ destination_channels_[i] = output_bus->Channel(i)->MutableData();
+
+ // Render by reading directly from the buffer.
+ if (!RenderFromBuffer(output_bus, quantum_frame_offset,
+ buffer_frames_to_process)) {
+ output_bus->Zero();
+ return;
+ }
+
+ output_bus->ClearSilentFlag();
+ } else {
+ // Too bad - the tryLock() failed. We must be in the middle of changing
+ // buffers and were already outputting silence anyway.
+ output_bus->Zero();
+ }
+}
+
+// Returns true if we're finished.
+bool AudioBufferSourceHandler::RenderSilenceAndFinishIfNotLooping(
+ AudioBus*,
+ unsigned index,
+ size_t frames_to_process) {
+ if (!Loop()) {
+ // If we're not looping, then stop playing when we get to the end.
+
+ if (frames_to_process > 0) {
+ // We're not looping and we've reached the end of the sample data, but we
+ // still need to provide more output, so generate silence for the
+ // remaining.
+ for (unsigned i = 0; i < NumberOfChannels(); ++i)
+ memset(destination_channels_[i] + index, 0,
+ sizeof(float) * frames_to_process);
+ }
+
+ Finish();
+ return true;
+ }
+ return false;
+}
+
+bool AudioBufferSourceHandler::RenderFromBuffer(
+ AudioBus* bus,
+ unsigned destination_frame_offset,
+ size_t number_of_frames) {
+ DCHECK(Context()->IsAudioThread());
+
+ // Basic sanity checking
+ DCHECK(bus);
+ DCHECK(Buffer());
+ if (!bus || !Buffer())
+ return false;
+
+ unsigned number_of_channels = this->NumberOfChannels();
+ unsigned bus_number_of_channels = bus->NumberOfChannels();
+
+ bool channel_count_good =
+ number_of_channels && number_of_channels == bus_number_of_channels;
+ DCHECK(channel_count_good);
+ if (!channel_count_good)
+ return false;
+
+ // Sanity check destinationFrameOffset, numberOfFrames.
+ size_t destination_length = bus->length();
+
+ bool is_length_good =
+ destination_length <= AudioUtilities::kRenderQuantumFrames &&
+ number_of_frames <= AudioUtilities::kRenderQuantumFrames;
+ DCHECK(is_length_good);
+ if (!is_length_good)
+ return false;
+
+ bool is_offset_good =
+ destination_frame_offset <= destination_length &&
+ destination_frame_offset + number_of_frames <= destination_length;
+ DCHECK(is_offset_good);
+ if (!is_offset_good)
+ return false;
+
+ // Potentially zero out initial frames leading up to the offset.
+ if (destination_frame_offset) {
+ for (unsigned i = 0; i < number_of_channels; ++i)
+ memset(destination_channels_[i], 0,
+ sizeof(float) * destination_frame_offset);
+ }
+
+ // Offset the pointers to the correct offset frame.
+ unsigned write_index = destination_frame_offset;
+
+ size_t buffer_length = Buffer()->length();
+ double buffer_sample_rate = Buffer()->sampleRate();
+
+ // Avoid converting from time to sample-frames twice by computing
+ // the grain end time first before computing the sample frame.
+ unsigned end_frame =
+ is_grain_ ? AudioUtilities::TimeToSampleFrame(
+ grain_offset_ + grain_duration_, buffer_sample_rate)
+ : buffer_length;
+
+ // Do some sanity checking.
+ if (end_frame > buffer_length)
+ end_frame = buffer_length;
+
+ // If the .loop attribute is true, then values of
+ // m_loopStart == 0 && m_loopEnd == 0 implies that we should use the entire
+ // buffer as the loop, otherwise use the loop values in m_loopStart and
+ // m_loopEnd.
+ double virtual_end_frame = end_frame;
+ double virtual_delta_frames = end_frame;
+
+ if (Loop() && (loop_start_ || loop_end_) && loop_start_ >= 0 &&
+ loop_end_ > 0 && loop_start_ < loop_end_) {
+ // Convert from seconds to sample-frames.
+ double loop_start_frame = loop_start_ * Buffer()->sampleRate();
+ double loop_end_frame = loop_end_ * Buffer()->sampleRate();
+
+ virtual_end_frame = std::min(loop_end_frame, virtual_end_frame);
+ virtual_delta_frames = virtual_end_frame - loop_start_frame;
+ }
+
+ // If we're looping and the offset (virtualReadIndex) is past the end of the
+ // loop, wrap back to the beginning of the loop. For other cases, nothing
+ // needs to be done.
+ if (Loop() && virtual_read_index_ >= virtual_end_frame) {
+ virtual_read_index_ =
+ (loop_start_ < 0) ? 0 : (loop_start_ * Buffer()->sampleRate());
+ virtual_read_index_ =
+ std::min(virtual_read_index_, static_cast<double>(buffer_length - 1));
+ }
+
+ double computed_playback_rate = ComputePlaybackRate();
+
+ // Sanity check that our playback rate isn't larger than the loop size.
+ if (computed_playback_rate > virtual_delta_frames)
+ return false;
+
+ // Get local copy.
+ double virtual_read_index = virtual_read_index_;
+
+ // Render loop - reading from the source buffer to the destination using
+ // linear interpolation.
+ int frames_to_process = number_of_frames;
+
+ const float** source_channels = source_channels_.get();
+ float** destination_channels = destination_channels_.get();
+
+ DCHECK_GE(virtual_read_index, 0);
+ DCHECK_GE(virtual_delta_frames, 0);
+ DCHECK_GE(virtual_end_frame, 0);
+
+ // Optimize for the very common case of playing back with
+ // computedPlaybackRate == 1. We can avoid the linear interpolation.
+ if (computed_playback_rate == 1 &&
+ virtual_read_index == floor(virtual_read_index) &&
+ virtual_delta_frames == floor(virtual_delta_frames) &&
+ virtual_end_frame == floor(virtual_end_frame)) {
+ unsigned read_index = static_cast<unsigned>(virtual_read_index);
+ unsigned delta_frames = static_cast<unsigned>(virtual_delta_frames);
+ end_frame = static_cast<unsigned>(virtual_end_frame);
+ while (frames_to_process > 0) {
+ int frames_to_end = end_frame - read_index;
+ int frames_this_time = std::min(frames_to_process, frames_to_end);
+ frames_this_time = std::max(0, frames_this_time);
+
+ DCHECK_LE(write_index + frames_this_time, destination_length);
+ DCHECK_LE(read_index + frames_this_time, buffer_length);
+
+ for (unsigned i = 0; i < number_of_channels; ++i)
+ memcpy(destination_channels[i] + write_index,
+ source_channels[i] + read_index,
+ sizeof(float) * frames_this_time);
+
+ write_index += frames_this_time;
+ read_index += frames_this_time;
+ frames_to_process -= frames_this_time;
+
+ // It can happen that framesThisTime is 0. DCHECK that we will actually
+ // exit the loop in this case. framesThisTime is 0 only if
+ // readIndex >= endFrame;
+ DCHECK(frames_this_time ? true : read_index >= end_frame);
+
+ // Wrap-around.
+ if (read_index >= end_frame) {
+ read_index -= delta_frames;
+ if (RenderSilenceAndFinishIfNotLooping(bus, write_index,
+ frames_to_process))
+ break;
+ }
+ }
+ virtual_read_index = read_index;
+ } else {
+ while (frames_to_process--) {
+ unsigned read_index = static_cast<unsigned>(virtual_read_index);
+ double interpolation_factor = virtual_read_index - read_index;
+
+ // For linear interpolation we need the next sample-frame too.
+ unsigned read_index2 = read_index + 1;
+ if (read_index2 >= buffer_length) {
+ if (Loop()) {
+ // Make sure to wrap around at the end of the buffer.
+ read_index2 = static_cast<unsigned>(virtual_read_index + 1 -
+ virtual_delta_frames);
+ } else {
+ read_index2 = read_index;
+ }
+ }
+
+ // Final sanity check on buffer access.
+ // FIXME: as an optimization, try to get rid of this inner-loop check and
+ // put assertions and guards before the loop.
+ if (read_index >= buffer_length || read_index2 >= buffer_length)
+ break;
+
+ // Linear interpolation.
+ for (unsigned i = 0; i < number_of_channels; ++i) {
+ float* destination = destination_channels[i];
+ const float* source = source_channels[i];
+
+ double sample1 = source[read_index];
+ double sample2 = source[read_index2];
+ double sample = (1.0 - interpolation_factor) * sample1 +
+ interpolation_factor * sample2;
+
+ destination[write_index] = clampTo<float>(sample);
+ }
+ write_index++;
+
+ virtual_read_index += computed_playback_rate;
+
+ // Wrap-around, retaining sub-sample position since virtualReadIndex is
+ // floating-point.
+ if (virtual_read_index >= virtual_end_frame) {
+ virtual_read_index -= virtual_delta_frames;
+ if (RenderSilenceAndFinishIfNotLooping(bus, write_index,
+ frames_to_process))
+ break;
+ }
+ }
+ }
+
+ bus->ClearSilentFlag();
+
+ virtual_read_index_ = virtual_read_index;
+
+ return true;
+}
+
+void AudioBufferSourceHandler::SetBuffer(AudioBuffer* buffer,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ if (buffer && buffer_has_been_set_) {
+ exception_state.ThrowDOMException(kInvalidStateError,
+ "Cannot set buffer to non-null after it "
+ "has been already been set to a non-null "
+ "buffer");
+ return;
+ }
+
+ // The context must be locked since changing the buffer can re-configure the
+ // number of channels that are output.
+ BaseAudioContext::GraphAutoLocker context_locker(Context());
+
+ // This synchronizes with process().
+ MutexLocker process_locker(process_lock_);
+
+ if (buffer) {
+ buffer_has_been_set_ = true;
+
+ // Do any necesssary re-configuration to the buffer's number of channels.
+ unsigned number_of_channels = buffer->numberOfChannels();
+
+ // This should not be possible since AudioBuffers can't be created with too
+ // many channels either.
+ if (number_of_channels > BaseAudioContext::MaxNumberOfChannels()) {
+ exception_state.ThrowDOMException(
+ kNotSupportedError,
+ ExceptionMessages::IndexOutsideRange(
+ "number of input channels", number_of_channels, 1u,
+ ExceptionMessages::kInclusiveBound,
+ BaseAudioContext::MaxNumberOfChannels(),
+ ExceptionMessages::kInclusiveBound));
+ return;
+ }
+
+ Output(0).SetNumberOfChannels(number_of_channels);
+
+ source_channels_ = std::make_unique<const float* []>(number_of_channels);
+ destination_channels_ = std::make_unique<float* []>(number_of_channels);
+
+ for (unsigned i = 0; i < number_of_channels; ++i)
+ source_channels_[i] = buffer->getChannelData(i).View()->Data();
+
+ // If this is a grain (as set by a previous call to start()), validate the
+ // grain parameters now since it wasn't validated when start was called
+ // (because there was no buffer then).
+ if (is_grain_)
+ ClampGrainParameters(buffer);
+ }
+
+ virtual_read_index_ = 0;
+ buffer_ = buffer;
+}
+
+unsigned AudioBufferSourceHandler::NumberOfChannels() {
+ return Output(0).NumberOfChannels();
+}
+
+void AudioBufferSourceHandler::ClampGrainParameters(const AudioBuffer* buffer) {
+ DCHECK(buffer);
+
+ // We have a buffer so we can clip the offset and duration to lie within the
+ // buffer.
+ double buffer_duration = buffer->duration();
+
+ grain_offset_ = clampTo(grain_offset_, 0.0, buffer_duration);
+
+ // If the duration was not explicitly given, use the buffer duration to set
+ // the grain duration. Otherwise, we want to use the user-specified value, of
+ // course.
+ if (!is_duration_given_)
+ grain_duration_ = buffer_duration - grain_offset_;
+
+ if (is_duration_given_ && Loop()) {
+ // We're looping a grain with a grain duration specified. Schedule the loop
+ // to stop after grainDuration seconds after starting, possibly running the
+ // loop multiple times if grainDuration is larger than the buffer duration.
+ // The net effect is as if the user called stop(when + grainDuration).
+ grain_duration_ =
+ clampTo(grain_duration_, 0.0, std::numeric_limits<double>::infinity());
+ end_time_ = start_time_ + grain_duration_;
+ } else {
+ grain_duration_ =
+ clampTo(grain_duration_, 0.0, buffer_duration - grain_offset_);
+ }
+
+ // We call timeToSampleFrame here since at playbackRate == 1 we don't want to
+ // go through linear interpolation at a sub-sample position since it will
+ // degrade the quality. When aligned to the sample-frame the playback will be
+ // identical to the PCM data stored in the buffer. Since playbackRate == 1 is
+ // very common, it's worth considering quality.
+ virtual_read_index_ =
+ AudioUtilities::TimeToSampleFrame(grain_offset_, buffer->sampleRate());
+}
+
+void AudioBufferSourceHandler::Start(double when,
+ ExceptionState& exception_state) {
+ AudioScheduledSourceHandler::Start(when, exception_state);
+}
+
+void AudioBufferSourceHandler::Start(double when,
+ double grain_offset,
+ ExceptionState& exception_state) {
+ StartSource(when, grain_offset, Buffer() ? Buffer()->duration() : 0, false,
+ exception_state);
+}
+
+void AudioBufferSourceHandler::Start(double when,
+ double grain_offset,
+ double grain_duration,
+ ExceptionState& exception_state) {
+ StartSource(when, grain_offset, grain_duration, true, exception_state);
+}
+
+void AudioBufferSourceHandler::StartSource(double when,
+ double grain_offset,
+ double grain_duration,
+ bool is_duration_given,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ Context()->MaybeRecordStartAttempt();
+
+ if (GetPlaybackState() != UNSCHEDULED_STATE) {
+ exception_state.ThrowDOMException(kInvalidStateError,
+ "cannot call start more than once.");
+ return;
+ }
+
+ if (when < 0) {
+ exception_state.ThrowRangeError(
+ ExceptionMessages::IndexExceedsMinimumBound("start time", when, 0.0));
+ return;
+ }
+
+ if (grain_offset < 0) {
+ exception_state.ThrowRangeError(ExceptionMessages::IndexExceedsMinimumBound(
+ "offset", grain_offset, 0.0));
+ return;
+ }
+
+ if (grain_duration < 0) {
+ exception_state.ThrowRangeError(ExceptionMessages::IndexExceedsMinimumBound(
+ "duration", grain_duration, 0.0));
+ return;
+ }
+
+ // The node is started. Add a reference to keep us alive so that audio
+ // will eventually get played even if Javascript should drop all references
+ // to this node. The reference will get dropped when the source has finished
+ // playing.
+ Context()->NotifySourceNodeStartedProcessing(GetNode());
+
+ // This synchronizes with process(). updateSchedulingInfo will read some of
+ // the variables being set here.
+ MutexLocker process_locker(process_lock_);
+
+ is_duration_given_ = is_duration_given;
+ is_grain_ = true;
+ grain_offset_ = grain_offset;
+ grain_duration_ = grain_duration;
+
+ // If |when| < currentTime, the source must start now according to the spec.
+ // So just set startTime to currentTime in this case to start the source now.
+ start_time_ = std::max(when, Context()->currentTime());
+
+ if (Buffer())
+ ClampGrainParameters(Buffer());
+
+ SetPlaybackState(SCHEDULED_STATE);
+}
+
+double AudioBufferSourceHandler::ComputePlaybackRate() {
+ // Incorporate buffer's sample-rate versus BaseAudioContext's sample-rate.
+ // Normally it's not an issue because buffers are loaded at the
+ // BaseAudioContext's sample-rate, but we can handle it in any case.
+ double sample_rate_factor = 1.0;
+ if (Buffer()) {
+ // Use doubles to compute this to full accuracy.
+ sample_rate_factor =
+ Buffer()->sampleRate() / static_cast<double>(Context()->sampleRate());
+ }
+
+ // Use finalValue() to incorporate changes of AudioParamTimeline and
+ // AudioSummingJunction from m_playbackRate AudioParam.
+ double base_playback_rate = playback_rate_->FinalValue();
+
+ double final_playback_rate = sample_rate_factor * base_playback_rate;
+
+ // Take the detune value into account for the final playback rate.
+ final_playback_rate *= pow(2, detune_->FinalValue() / 1200);
+
+ // Sanity check the total rate. It's very important that the resampler not
+ // get any bad rate values.
+ final_playback_rate = clampTo(final_playback_rate, 0.0, kMaxRate);
+
+ bool is_playback_rate_valid =
+ !std::isnan(final_playback_rate) && !std::isinf(final_playback_rate);
+ DCHECK(is_playback_rate_valid);
+
+ if (!is_playback_rate_valid)
+ final_playback_rate = 1.0;
+
+ // Record the minimum playback rate for use by HandleStoppableSourceNode.
+ if (final_playback_rate < min_playback_rate_) {
+ MutexLocker locker(min_playback_rate_mutex_);
+ min_playback_rate_ = final_playback_rate;
+ }
+
+ return final_playback_rate;
+}
+
+double AudioBufferSourceHandler::GetMinPlaybackRate() {
+ DCHECK(IsMainThread());
+ MutexLocker locker(min_playback_rate_mutex_);
+ return min_playback_rate_;
+}
+
+bool AudioBufferSourceHandler::PropagatesSilence() const {
+ return !IsPlayingOrScheduled() || HasFinished() || !buffer_;
+}
+
+void AudioBufferSourceHandler::HandleStoppableSourceNode() {
+ // If the source node is not looping, and we have a buffer, we can determine
+ // when the source would stop playing. This is intended to handle the
+ // (uncommon) scenario where start() has been called but is never connected to
+ // the destination (directly or indirectly). By stopping the node, the node
+ // can be collected. Otherwise, the node will never get collected, leaking
+ // memory.
+ //
+ // If looping was ever done (m_didSetLooping = true), give up. We can't
+ // easily determine how long we looped so we don't know the actual duration
+ // thus far, so don't try to do anything fancy.
+ double min_playback_rate = GetMinPlaybackRate();
+ if (!DidSetLooping() && Buffer() && IsPlayingOrScheduled() &&
+ min_playback_rate > 0) {
+ // Adjust the duration to include the playback rate. Only need to account
+ // for rate < 1 which makes the sound last longer. For rate >= 1, the
+ // source stops sooner, but that's ok.
+ double actual_duration = Buffer()->duration() / min_playback_rate;
+
+ double stop_time = start_time_ + actual_duration;
+
+ // See crbug.com/478301. If a source node is started via start(), the source
+ // may not start at that time but one quantum (128 frames) later. But we
+ // compute the stop time based on the start time and the duration, so we end
+ // up stopping one quantum early. Thus, add a little extra time; we just
+ // need to stop the source sometime after it should have stopped if it
+ // hadn't already. We don't need to be super precise on when to stop.
+ double extra_stop_time =
+ kExtraStopFrames / static_cast<double>(Context()->sampleRate());
+
+ stop_time += extra_stop_time;
+ if (Context()->currentTime() > stop_time) {
+ // The context time has passed the time when the source nodes should have
+ // stopped playing. Stop the node now and deref it. (But don't run the
+ // onEnded event because the source never actually played.)
+ FinishWithoutOnEnded();
+ }
+ }
+}
+
+// ----------------------------------------------------------------
+AudioBufferSourceNode::AudioBufferSourceNode(BaseAudioContext& context)
+ : AudioScheduledSourceNode(context),
+ playback_rate_(AudioParam::Create(context,
+ kParamTypeAudioBufferSourcePlaybackRate,
+ "AudioBufferSource.playbackRate",
+ 1.0)),
+ detune_(AudioParam::Create(context,
+ kParamTypeAudioBufferSourceDetune,
+ "AudioBufferSource.detune",
+ 0.0)) {
+ SetHandler(AudioBufferSourceHandler::Create(*this, context.sampleRate(),
+ playback_rate_->Handler(),
+ detune_->Handler()));
+}
+
+AudioBufferSourceNode* AudioBufferSourceNode::Create(
+ BaseAudioContext& context,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ if (context.IsContextClosed()) {
+ context.ThrowExceptionForClosedState(exception_state);
+ return nullptr;
+ }
+
+ return new AudioBufferSourceNode(context);
+}
+
+AudioBufferSourceNode* AudioBufferSourceNode::Create(
+ BaseAudioContext* context,
+ AudioBufferSourceOptions& options,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ AudioBufferSourceNode* node = Create(*context, exception_state);
+
+ if (!node)
+ return nullptr;
+
+ if (options.hasBuffer())
+ node->setBuffer(options.buffer(), exception_state);
+ node->detune()->setValue(options.detune());
+ node->setLoop(options.loop());
+ node->setLoopEnd(options.loopEnd());
+ node->setLoopStart(options.loopStart());
+ node->playbackRate()->setValue(options.playbackRate());
+
+ return node;
+}
+
+void AudioBufferSourceNode::Trace(blink::Visitor* visitor) {
+ visitor->Trace(playback_rate_);
+ visitor->Trace(detune_);
+ AudioScheduledSourceNode::Trace(visitor);
+}
+
+AudioBufferSourceHandler& AudioBufferSourceNode::GetAudioBufferSourceHandler()
+ const {
+ return static_cast<AudioBufferSourceHandler&>(Handler());
+}
+
+AudioBuffer* AudioBufferSourceNode::buffer() const {
+ return GetAudioBufferSourceHandler().Buffer();
+}
+
+void AudioBufferSourceNode::setBuffer(AudioBuffer* new_buffer,
+ ExceptionState& exception_state) {
+ GetAudioBufferSourceHandler().SetBuffer(new_buffer, exception_state);
+}
+
+AudioParam* AudioBufferSourceNode::playbackRate() const {
+ return playback_rate_;
+}
+
+AudioParam* AudioBufferSourceNode::detune() const {
+ return detune_;
+}
+
+bool AudioBufferSourceNode::loop() const {
+ return GetAudioBufferSourceHandler().Loop();
+}
+
+void AudioBufferSourceNode::setLoop(bool loop) {
+ GetAudioBufferSourceHandler().SetLoop(loop);
+}
+
+double AudioBufferSourceNode::loopStart() const {
+ return GetAudioBufferSourceHandler().LoopStart();
+}
+
+void AudioBufferSourceNode::setLoopStart(double loop_start) {
+ GetAudioBufferSourceHandler().SetLoopStart(loop_start);
+}
+
+double AudioBufferSourceNode::loopEnd() const {
+ return GetAudioBufferSourceHandler().LoopEnd();
+}
+
+void AudioBufferSourceNode::setLoopEnd(double loop_end) {
+ GetAudioBufferSourceHandler().SetLoopEnd(loop_end);
+}
+
+void AudioBufferSourceNode::start(ExceptionState& exception_state) {
+ GetAudioBufferSourceHandler().Start(0, exception_state);
+}
+
+void AudioBufferSourceNode::start(double when,
+ ExceptionState& exception_state) {
+ GetAudioBufferSourceHandler().Start(when, exception_state);
+}
+
+void AudioBufferSourceNode::start(double when,
+ double grain_offset,
+ ExceptionState& exception_state) {
+ GetAudioBufferSourceHandler().Start(when, grain_offset, exception_state);
+}
+
+void AudioBufferSourceNode::start(double when,
+ double grain_offset,
+ double grain_duration,
+ ExceptionState& exception_state) {
+ GetAudioBufferSourceHandler().Start(when, grain_offset, grain_duration,
+ exception_state);
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_buffer_source_node.h b/chromium/third_party/blink/renderer/modules/webaudio/audio_buffer_source_node.h
new file mode 100644
index 00000000000..dc9d4076d5a
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_buffer_source_node.h
@@ -0,0 +1,227 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_BUFFER_SOURCE_NODE_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_BUFFER_SOURCE_NODE_H_
+
+#include <memory>
+#include "base/memory/scoped_refptr.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_buffer.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_param.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_scheduled_source_node.h"
+#include "third_party/blink/renderer/modules/webaudio/panner_node.h"
+#include "third_party/blink/renderer/platform/audio/audio_bus.h"
+#include "third_party/blink/renderer/platform/wtf/threading.h"
+
+namespace blink {
+
+class AudioBufferSourceOptions;
+class BaseAudioContext;
+
+// AudioBufferSourceNode is an AudioNode representing an audio source from an
+// in-memory audio asset represented by an AudioBuffer. It generally will be
+// used for short sounds which require a high degree of scheduling flexibility
+// (can playback in rhythmically perfect ways).
+
+class AudioBufferSourceHandler final : public AudioScheduledSourceHandler {
+ public:
+ static scoped_refptr<AudioBufferSourceHandler> Create(
+ AudioNode&,
+ float sample_rate,
+ AudioParamHandler& playback_rate,
+ AudioParamHandler& detune);
+ ~AudioBufferSourceHandler() override;
+
+ // AudioHandler
+ void Process(size_t frames_to_process) override;
+
+ // setBuffer() is called on the main thread. This is the buffer we use for
+ // playback.
+ void SetBuffer(AudioBuffer*, ExceptionState&);
+ AudioBuffer* Buffer() { return buffer_.Get(); }
+
+ // numberOfChannels() returns the number of output channels. This value
+ // equals the number of channels from the buffer. If a new buffer is set with
+ // a different number of channels, then this value will dynamically change.
+ unsigned NumberOfChannels();
+
+ // Play-state
+ void Start(double when, ExceptionState&);
+ void Start(double when, double grain_offset, ExceptionState&);
+ void Start(double when,
+ double grain_offset,
+ double grain_duration,
+ ExceptionState&);
+
+ // Note: the attribute was originally exposed as |.looping|, but to be more
+ // consistent in naming with <audio> and with how it's described in the
+ // specification, the proper attribute name is |.loop|. The old attribute is
+ // kept for backwards compatibility.
+ bool Loop() const { return is_looping_; }
+ void SetLoop(bool looping) {
+ is_looping_ = looping;
+ SetDidSetLooping(looping);
+ }
+
+ // Loop times in seconds.
+ double LoopStart() const { return loop_start_; }
+ double LoopEnd() const { return loop_end_; }
+ void SetLoopStart(double loop_start) { loop_start_ = loop_start; }
+ void SetLoopEnd(double loop_end) { loop_end_ = loop_end; }
+
+ // If we are no longer playing, propogate silence ahead to downstream nodes.
+ bool PropagatesSilence() const override;
+
+ void HandleStoppableSourceNode();
+
+ private:
+ AudioBufferSourceHandler(AudioNode&,
+ float sample_rate,
+ AudioParamHandler& playback_rate,
+ AudioParamHandler& detune);
+ void StartSource(double when,
+ double grain_offset,
+ double grain_duration,
+ bool is_duration_given,
+ ExceptionState&);
+
+ // Returns true on success.
+ bool RenderFromBuffer(AudioBus*,
+ unsigned destination_frame_offset,
+ size_t number_of_frames);
+
+ // Render silence starting from "index" frame in AudioBus.
+ inline bool RenderSilenceAndFinishIfNotLooping(AudioBus*,
+ unsigned index,
+ size_t frames_to_process);
+
+ // Clamps grain parameters to the duration of the given AudioBuffer.
+ void ClampGrainParameters(const AudioBuffer*);
+
+ // m_buffer holds the sample data which this node outputs.
+ // This Persistent doesn't make a reference cycle including
+ // AudioBufferSourceNode.
+ // It is cross-thread, as it will be accessed by the audio and main threads.
+ CrossThreadPersistent<AudioBuffer> buffer_;
+
+ // Pointers for the buffer and destination.
+ std::unique_ptr<const float* []> source_channels_;
+ std::unique_ptr<float* []> destination_channels_;
+
+ scoped_refptr<AudioParamHandler> playback_rate_;
+ scoped_refptr<AudioParamHandler> detune_;
+
+ bool DidSetLooping() const { return AcquireLoad(&did_set_looping_); }
+ void SetDidSetLooping(bool loop) {
+ bool new_looping = DidSetLooping() || loop;
+ ReleaseStore(&did_set_looping_, new_looping);
+ }
+
+ // If m_isLooping is false, then this node will be done playing and become
+ // inactive after it reaches the end of the sample data in the buffer. If
+ // true, it will wrap around to the start of the buffer each time it reaches
+ // the end.
+ bool is_looping_;
+
+ // True if the source .loop attribute was ever set.
+ int did_set_looping_;
+
+ double loop_start_;
+ double loop_end_;
+
+ // m_virtualReadIndex is a sample-frame index into our buffer representing the
+ // current playback position. Since it's floating-point, it has sub-sample
+ // accuracy.
+ double virtual_read_index_;
+
+ // Granular playback
+ bool is_grain_;
+ double grain_offset_; // in seconds
+ double grain_duration_; // in seconds
+ // True if grainDuration is given explicitly (via 3 arg start method).
+ bool is_duration_given_;
+
+ // Compute playback rate (k-rate) by incorporating the sample rate
+ // conversion factor, and the value of playbackRate and detune AudioParams.
+ double ComputePlaybackRate();
+
+ double GetMinPlaybackRate();
+
+ // The minimum playbackRate value ever used for this source.
+ double min_playback_rate_;
+
+ // |min_playback_rate_| may be updated by the audio thread
+ // while the main thread checks if the node is in a stoppable
+ // state, hence access needs to be atomic.
+ //
+ // TODO: when the codebase adopts std::atomic<>, use it for
+ // |min_playback_rate_|.
+ Mutex min_playback_rate_mutex_;
+
+ // True if the |buffer| attribute has ever been set to a non-null
+ // value. Defaults to false.
+ bool buffer_has_been_set_;
+};
+
+class AudioBufferSourceNode final : public AudioScheduledSourceNode {
+ DEFINE_WRAPPERTYPEINFO();
+
+ public:
+ static AudioBufferSourceNode* Create(BaseAudioContext&, ExceptionState&);
+ static AudioBufferSourceNode* Create(BaseAudioContext*,
+ AudioBufferSourceOptions&,
+ ExceptionState&);
+ virtual void Trace(blink::Visitor*);
+ AudioBufferSourceHandler& GetAudioBufferSourceHandler() const;
+
+ AudioBuffer* buffer() const;
+ void setBuffer(AudioBuffer*, ExceptionState&);
+ AudioParam* playbackRate() const;
+ AudioParam* detune() const;
+ bool loop() const;
+ void setLoop(bool);
+ double loopStart() const;
+ void setLoopStart(double);
+ double loopEnd() const;
+ void setLoopEnd(double);
+
+ void start(ExceptionState&);
+ void start(double when, ExceptionState&);
+ void start(double when, double grain_offset, ExceptionState&);
+ void start(double when,
+ double grain_offset,
+ double grain_duration,
+ ExceptionState&);
+
+ private:
+ AudioBufferSourceNode(BaseAudioContext&);
+
+ Member<AudioParam> playback_rate_;
+ Member<AudioParam> detune_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_BUFFER_SOURCE_NODE_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_buffer_source_node.idl b/chromium/third_party/blink/renderer/modules/webaudio/audio_buffer_source_node.idl
new file mode 100644
index 00000000000..ed3550e25b8
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_buffer_source_node.idl
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+// A cached (non-streamed), memory-resident audio source
+// See https://webaudio.github.io/web-audio-api/#AudioBufferSourceNode
+[
+ Constructor(BaseAudioContext context, optional AudioBufferSourceOptions options),
+ RaisesException=Constructor,
+ ActiveScriptWrappable,
+ Measure
+]
+interface AudioBufferSourceNode : AudioScheduledSourceNode {
+ [RaisesException=Setter] attribute AudioBuffer? buffer;
+
+ readonly attribute AudioParam playbackRate;
+ readonly attribute AudioParam detune;
+
+ attribute boolean loop;
+ attribute double loopStart;
+ attribute double loopEnd;
+
+ [RaisesException] void start(optional double when = 0, optional double grainOffset, optional double grainDuration);
+
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_buffer_source_options.idl b/chromium/third_party/blink/renderer/modules/webaudio/audio_buffer_source_options.idl
new file mode 100644
index 00000000000..d95bce3f933
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_buffer_source_options.idl
@@ -0,0 +1,13 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See hhttps://webaudio.github.io/web-audio-api/#dictdef-audiobuffersourceoptions
+dictionary AudioBufferSourceOptions {
+ AudioBuffer? buffer;
+ float detune = 0;
+ boolean loop = false;
+ double loopEnd = 0;
+ double loopStart = 0;
+ float playbackRate = 1;
+}; \ No newline at end of file
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_context.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_context.cc
new file mode 100644
index 00000000000..f1e12349f27
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_context.cc
@@ -0,0 +1,267 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/modules/webaudio/audio_context.h"
+
+#include "third_party/blink/public/platform/web_audio_latency_hint.h"
+#include "third_party/blink/renderer/bindings/core/v8/exception_messages.h"
+#include "third_party/blink/renderer/bindings/core/v8/exception_state.h"
+#include "third_party/blink/renderer/bindings/core/v8/script_promise_resolver.h"
+#include "third_party/blink/renderer/core/dom/dom_exception.h"
+#include "third_party/blink/renderer/core/dom/exception_code.h"
+#include "third_party/blink/renderer/core/frame/local_dom_window.h"
+#include "third_party/blink/renderer/core/frame/use_counter.h"
+#include "third_party/blink/renderer/core/timing/dom_window_performance.h"
+#include "third_party/blink/renderer/core/timing/window_performance.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_context_options.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_timestamp.h"
+#include "third_party/blink/renderer/modules/webaudio/default_audio_destination_node.h"
+#include "third_party/blink/renderer/platform/audio/audio_utilities.h"
+#include "third_party/blink/renderer/platform/histogram.h"
+
+#if DEBUG_AUDIONODE_REFERENCES
+#include <stdio.h>
+#endif
+
+namespace blink {
+
+// Number of AudioContexts still alive. It's incremented when an
+// AudioContext is created and decremented when the context is closed.
+static unsigned g_hardware_context_count = 0;
+
+// A context ID that is incremented for each context that is created.
+// This initializes the internal id for the context.
+static unsigned g_context_id = 0;
+
+AudioContext* AudioContext::Create(Document& document,
+ const AudioContextOptions& context_options,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ UseCounter::CountCrossOriginIframe(
+ document, WebFeature::kAudioContextCrossOriginIframe);
+
+ WebAudioLatencyHint latency_hint(WebAudioLatencyHint::kCategoryInteractive);
+ if (context_options.latencyHint().IsAudioContextLatencyCategory()) {
+ latency_hint = WebAudioLatencyHint(
+ context_options.latencyHint().GetAsAudioContextLatencyCategory());
+ } else if (context_options.latencyHint().IsDouble()) {
+ // This should be the requested output latency in seconds, without taking
+ // into account double buffering (same as baseLatency).
+ latency_hint =
+ WebAudioLatencyHint(context_options.latencyHint().GetAsDouble());
+ }
+
+ AudioContext* audio_context = new AudioContext(document, latency_hint);
+ audio_context->PauseIfNeeded();
+
+ if (!AudioUtilities::IsValidAudioBufferSampleRate(
+ audio_context->sampleRate())) {
+ exception_state.ThrowDOMException(
+ kNotSupportedError,
+ ExceptionMessages::IndexOutsideRange(
+ "hardware sample rate", audio_context->sampleRate(),
+ AudioUtilities::MinAudioBufferSampleRate(),
+ ExceptionMessages::kInclusiveBound,
+ AudioUtilities::MaxAudioBufferSampleRate(),
+ ExceptionMessages::kInclusiveBound));
+ return audio_context;
+ }
+ // This starts the audio thread. The destination node's
+ // provideInput() method will now be called repeatedly to render
+ // audio. Each time provideInput() is called, a portion of the
+ // audio stream is rendered. Let's call this time period a "render
+ // quantum". NOTE: for now AudioContext does not need an explicit
+ // startRendering() call from JavaScript. We may want to consider
+ // requiring it for symmetry with OfflineAudioContext.
+ audio_context->MaybeUnlockUserGesture();
+ if (audio_context->IsAllowedToStart()) {
+ audio_context->StartRendering();
+ audio_context->SetContextState(kRunning);
+ }
+ ++g_hardware_context_count;
+#if DEBUG_AUDIONODE_REFERENCES
+ fprintf(stderr, "[%16p]: AudioContext::AudioContext(): %u #%u\n",
+ audio_context, audio_context->context_id_, g_hardware_context_count);
+#endif
+
+ DEFINE_STATIC_LOCAL(SparseHistogram, max_channel_count_histogram,
+ ("WebAudio.AudioContext.MaxChannelsAvailable"));
+ DEFINE_STATIC_LOCAL(SparseHistogram, sample_rate_histogram,
+ ("WebAudio.AudioContext.HardwareSampleRate"));
+ max_channel_count_histogram.Sample(
+ audio_context->destination()->maxChannelCount());
+ sample_rate_histogram.Sample(audio_context->sampleRate());
+
+ return audio_context;
+}
+
+AudioContext::AudioContext(Document& document,
+ const WebAudioLatencyHint& latency_hint)
+ : BaseAudioContext(&document, kRealtimeContext),
+ context_id_(g_context_id++) {
+ destination_node_ = DefaultAudioDestinationNode::Create(this, latency_hint);
+ Initialize();
+}
+
+AudioContext::~AudioContext() {
+#if DEBUG_AUDIONODE_REFERENCES
+ fprintf(stderr, "[%16p]: AudioContext::~AudioContext(): %u\n", this,
+ context_id_);
+#endif
+}
+
+void AudioContext::Trace(blink::Visitor* visitor) {
+ visitor->Trace(close_resolver_);
+ BaseAudioContext::Trace(visitor);
+}
+
+ScriptPromise AudioContext::suspendContext(ScriptState* script_state) {
+ DCHECK(IsMainThread());
+ GraphAutoLocker locker(this);
+
+ ScriptPromiseResolver* resolver = ScriptPromiseResolver::Create(script_state);
+ ScriptPromise promise = resolver->Promise();
+
+ if (ContextState() == kClosed) {
+ resolver->Reject(DOMException::Create(
+ kInvalidStateError, "Cannot suspend a context that has been closed"));
+ } else {
+ // Stop rendering now.
+ if (destination())
+ StopRendering();
+
+ // Since we don't have any way of knowing when the hardware actually stops,
+ // we'll just resolve the promise now.
+ resolver->Resolve();
+ }
+
+ return promise;
+}
+
+ScriptPromise AudioContext::resumeContext(ScriptState* script_state) {
+ DCHECK(IsMainThread());
+
+ if (IsContextClosed()) {
+ return ScriptPromise::RejectWithDOMException(
+ script_state,
+ DOMException::Create(kInvalidAccessError,
+ "cannot resume a closed AudioContext"));
+ }
+
+ ScriptPromiseResolver* resolver = ScriptPromiseResolver::Create(script_state);
+ ScriptPromise promise = resolver->Promise();
+
+ // If we're already running, just resolve; nothing else needs to be
+ // done.
+ if (ContextState() == kRunning) {
+ resolver->Resolve();
+ return promise;
+ }
+ // Restart the destination node to pull on the audio graph.
+ if (destination()) {
+ MaybeUnlockUserGesture();
+ if (IsAllowedToStart()) {
+ // Do not set the state to running here. We wait for the
+ // destination to start to set the state.
+ StartRendering();
+ }
+ }
+
+ // Save the resolver which will get resolved when the destination node starts
+ // pulling on the graph again.
+ {
+ GraphAutoLocker locker(this);
+ resume_resolvers_.push_back(resolver);
+ }
+
+ return promise;
+}
+
+void AudioContext::getOutputTimestamp(ScriptState* script_state,
+ AudioTimestamp& result) {
+ DCHECK(IsMainThread());
+ LocalDOMWindow* window = LocalDOMWindow::From(script_state);
+ if (!window)
+ return;
+
+ if (!destination()) {
+ result.setContextTime(0.0);
+ result.setPerformanceTime(0.0);
+ return;
+ }
+
+ WindowPerformance* performance = DOMWindowPerformance::performance(*window);
+ DCHECK(performance);
+
+ AudioIOPosition position = OutputPosition();
+
+ double performance_time = performance->MonotonicTimeToDOMHighResTimeStamp(
+ TimeTicksFromSeconds(position.timestamp));
+ if (performance_time < 0.0)
+ performance_time = 0.0;
+
+ result.setContextTime(position.position);
+ result.setPerformanceTime(performance_time);
+}
+
+ScriptPromise AudioContext::closeContext(ScriptState* script_state) {
+ if (IsContextClosed()) {
+ // We've already closed the context previously, but it hasn't yet been
+ // resolved, so just create a new promise and reject it.
+ return ScriptPromise::RejectWithDOMException(
+ script_state,
+ DOMException::Create(kInvalidStateError,
+ "Cannot close a context that is being closed or "
+ "has already been closed."));
+ }
+
+ // Save the current sample rate for any subsequent decodeAudioData calls.
+ SetClosedContextSampleRate(sampleRate());
+
+ close_resolver_ = ScriptPromiseResolver::Create(script_state);
+ ScriptPromise promise = close_resolver_->Promise();
+
+ // Stop the audio context. This will stop the destination node from pulling
+ // audio anymore. And since we have disconnected the destination from the
+ // audio graph, and thus has no references, the destination node can GCed if
+ // JS has no references. uninitialize() will also resolve the Promise created
+ // here.
+ Uninitialize();
+
+ return promise;
+}
+
+void AudioContext::DidClose() {
+ // This is specific to AudioContexts. OfflineAudioContexts
+ // are closed in their completion event.
+ SetContextState(kClosed);
+
+ DCHECK(g_hardware_context_count);
+ --g_hardware_context_count;
+
+ if (close_resolver_)
+ close_resolver_->Resolve();
+}
+
+bool AudioContext::IsContextClosed() const {
+ return close_resolver_ || BaseAudioContext::IsContextClosed();
+}
+
+void AudioContext::StopRendering() {
+ DCHECK(IsMainThread());
+ DCHECK(destination());
+
+ if (ContextState() == kRunning) {
+ destination()->GetAudioDestinationHandler().StopRendering();
+ SetContextState(kSuspended);
+ GetDeferredTaskHandler().ClearHandlersToBeDeleted();
+ }
+}
+
+double AudioContext::baseLatency() const {
+ return FramesPerBuffer() / static_cast<double>(sampleRate());
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_context.h b/chromium/third_party/blink/renderer/modules/webaudio/audio_context.h
new file mode 100644
index 00000000000..24667a839dc
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_context.h
@@ -0,0 +1,62 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_CONTEXT_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_CONTEXT_H_
+
+#include "third_party/blink/renderer/bindings/core/v8/script_promise.h"
+#include "third_party/blink/renderer/bindings/core/v8/script_promise_resolver.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_context_options.h"
+#include "third_party/blink/renderer/modules/webaudio/base_audio_context.h"
+#include "third_party/blink/renderer/platform/heap/handle.h"
+
+namespace blink {
+
+class AudioContextOptions;
+class AudioTimestamp;
+class Document;
+class ExceptionState;
+class ScriptState;
+class WebAudioLatencyHint;
+
+// This is an BaseAudioContext which actually plays sound, unlike an
+// OfflineAudioContext which renders sound into a buffer.
+class MODULES_EXPORT AudioContext : public BaseAudioContext {
+ DEFINE_WRAPPERTYPEINFO();
+
+ public:
+ static AudioContext* Create(Document&,
+ const AudioContextOptions&,
+ ExceptionState&);
+
+ ~AudioContext() override;
+ virtual void Trace(blink::Visitor*);
+
+ ScriptPromise closeContext(ScriptState*);
+ bool IsContextClosed() const final;
+
+ ScriptPromise suspendContext(ScriptState*) final;
+ ScriptPromise resumeContext(ScriptState*) final;
+
+ bool HasRealtimeConstraint() final { return true; }
+
+ void getOutputTimestamp(ScriptState*, AudioTimestamp&);
+ double baseLatency() const;
+
+ protected:
+ AudioContext(Document&, const WebAudioLatencyHint&);
+
+ void DidClose() final;
+
+ private:
+ void StopRendering();
+
+ unsigned context_id_;
+ Member<ScriptPromiseResolver> close_resolver_;
+
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_CONTEXT_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_context.idl b/chromium/third_party/blink/renderer/modules/webaudio/audio_context.idl
new file mode 100644
index 00000000000..2983635f315
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_context.idl
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+enum AudioContextLatencyCategory {
+ "balanced",
+ "interactive",
+ "playback"
+};
+
+// See https://webaudio.github.io/web-audio-api/#AudioContext
+[
+ ActiveScriptWrappable,
+ Constructor(optional AudioContextOptions contextOptions),
+ ConstructorCallWith=Document,
+ RaisesException=Constructor,
+ Measure
+] interface AudioContext : BaseAudioContext {
+ [MeasureAs=AudioContextSuspend, CallWith=ScriptState, ImplementedAs=suspendContext] Promise<void> suspend();
+ [MeasureAs=AudioContextClose, CallWith=ScriptState, ImplementedAs=closeContext] Promise<void> close();
+
+ // Output timestamp
+ [MeasureAs=AudioContextGetOutputTimestamp, CallWith=ScriptState] AudioTimestamp getOutputTimestamp();
+
+ // Number of seconds of processing latency incurred by the AudioContext
+ // passing the audio from the AudioDestinationNode to the audio subsystem
+ readonly attribute double baseLatency;
+
+ // Sources
+ // TODO(rtoy): The following methods should be here instead of in BaseAudioContext:
+ //
+ // createMediaElementSource(HTMLMediaElement mediaElement)
+ // createMediaStreamSource(MediaStream mediaStream)
+ // createMediaStreamDestination()
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_context_options.idl b/chromium/third_party/blink/renderer/modules/webaudio/audio_context_options.idl
new file mode 100644
index 00000000000..bfab28c1b86
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_context_options.idl
@@ -0,0 +1,11 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See https://webaudio.github.io/web-audio-api/#AudioContextOptions
+dictionary AudioContextOptions {
+ // If passed as a double this should be the requested output latency in
+ // seconds, without taking into account double buffering (same as
+ // AudioContext.baseLatency).
+ (AudioContextLatencyCategory or double) latencyHint = "interactive";
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_context_test.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_context_test.cc
new file mode 100644
index 00000000000..69d0f8722b9
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_context_test.cc
@@ -0,0 +1,156 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/modules/webaudio/audio_context.h"
+
+#include <memory>
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/blink/public/platform/web_audio_device.h"
+#include "third_party/blink/public/platform/web_audio_latency_hint.h"
+#include "third_party/blink/renderer/core/dom/document.h"
+#include "third_party/blink/renderer/core/testing/page_test_base.h"
+#include "third_party/blink/renderer/platform/testing/testing_platform_support.h"
+
+namespace blink {
+
+namespace {
+
+class MockWebAudioDeviceForAudioContext : public WebAudioDevice {
+ public:
+ explicit MockWebAudioDeviceForAudioContext(double sample_rate,
+ int frames_per_buffer)
+ : sample_rate_(sample_rate), frames_per_buffer_(frames_per_buffer) {}
+ ~MockWebAudioDeviceForAudioContext() override = default;
+
+ void Start() override {}
+ void Stop() override {}
+ double SampleRate() override { return sample_rate_; }
+ int FramesPerBuffer() override { return frames_per_buffer_; }
+
+ private:
+ double sample_rate_;
+ int frames_per_buffer_;
+};
+
+class AudioContextTestPlatform : public TestingPlatformSupport {
+ public:
+ std::unique_ptr<WebAudioDevice> CreateAudioDevice(
+ unsigned number_of_input_channels,
+ unsigned number_of_channels,
+ const WebAudioLatencyHint& latency_hint,
+ WebAudioDevice::RenderCallback*,
+ const WebString& device_id,
+ const WebSecurityOrigin&) override {
+ double buffer_size = 0;
+ const double interactive_size = AudioHardwareBufferSize();
+ const double balanced_size = AudioHardwareBufferSize() * 2;
+ const double playback_size = AudioHardwareBufferSize() * 4;
+ switch (latency_hint.Category()) {
+ case WebAudioLatencyHint::kCategoryInteractive:
+ buffer_size = interactive_size;
+ break;
+ case WebAudioLatencyHint::kCategoryBalanced:
+ buffer_size = balanced_size;
+ break;
+ case WebAudioLatencyHint::kCategoryPlayback:
+ buffer_size = playback_size;
+ break;
+ case WebAudioLatencyHint::kCategoryExact:
+ buffer_size =
+ clampTo(latency_hint.Seconds() * AudioHardwareSampleRate(),
+ static_cast<double>(AudioHardwareBufferSize()),
+ static_cast<double>(playback_size));
+ break;
+ default:
+ NOTREACHED();
+ break;
+ }
+
+ return std::make_unique<MockWebAudioDeviceForAudioContext>(
+ AudioHardwareSampleRate(), buffer_size);
+ }
+
+ std::unique_ptr<WebThread> CreateThread(
+ const WebThreadCreationParams& params) override {
+ return old_platform_->CreateThread(params);
+ }
+
+ double AudioHardwareSampleRate() override { return 44100; }
+ size_t AudioHardwareBufferSize() override { return 128; }
+};
+
+} // anonymous namespace
+
+class AudioContextTest : public PageTestBase {
+ protected:
+ AudioContextTest() :
+ platform_(new ScopedTestingPlatformSupport<AudioContextTestPlatform>) {}
+
+ ~AudioContextTest() {
+ platform_.reset();
+ }
+
+ void SetUp() override { PageTestBase::SetUp(IntSize()); }
+
+ private:
+ std::unique_ptr<ScopedTestingPlatformSupport<AudioContextTestPlatform>>
+ platform_;
+};
+
+TEST_F(AudioContextTest, AudioContextOptions_WebAudioLatencyHint) {
+ AudioContextOptions interactive_options;
+ interactive_options.setLatencyHint(
+ AudioContextLatencyCategoryOrDouble::FromAudioContextLatencyCategory(
+ "interactive"));
+ AudioContext* interactive_context = AudioContext::Create(
+ GetDocument(), interactive_options, ASSERT_NO_EXCEPTION);
+
+ AudioContextOptions balanced_options;
+ balanced_options.setLatencyHint(
+ AudioContextLatencyCategoryOrDouble::FromAudioContextLatencyCategory(
+ "balanced"));
+ AudioContext* balanced_context = AudioContext::Create(
+ GetDocument(), balanced_options, ASSERT_NO_EXCEPTION);
+ EXPECT_GT(balanced_context->baseLatency(),
+ interactive_context->baseLatency());
+
+ AudioContextOptions playback_options;
+ playback_options.setLatencyHint(
+ AudioContextLatencyCategoryOrDouble::FromAudioContextLatencyCategory(
+ "playback"));
+ AudioContext* playback_context = AudioContext::Create(
+ GetDocument(), playback_options, ASSERT_NO_EXCEPTION);
+ EXPECT_GT(playback_context->baseLatency(), balanced_context->baseLatency());
+
+ AudioContextOptions exact_too_small_options;
+ exact_too_small_options.setLatencyHint(
+ AudioContextLatencyCategoryOrDouble::FromDouble(
+ interactive_context->baseLatency() / 2));
+ AudioContext* exact_too_small_context = AudioContext::Create(
+ GetDocument(), exact_too_small_options, ASSERT_NO_EXCEPTION);
+ EXPECT_EQ(exact_too_small_context->baseLatency(),
+ interactive_context->baseLatency());
+
+ const double exact_latency_sec =
+ (interactive_context->baseLatency() + playback_context->baseLatency()) /
+ 2;
+ AudioContextOptions exact_ok_options;
+ exact_ok_options.setLatencyHint(
+ AudioContextLatencyCategoryOrDouble::FromDouble(exact_latency_sec));
+ AudioContext* exact_ok_context = AudioContext::Create(
+ GetDocument(), exact_ok_options, ASSERT_NO_EXCEPTION);
+ EXPECT_EQ(exact_ok_context->baseLatency(), exact_latency_sec);
+
+ AudioContextOptions exact_too_big_options;
+ exact_too_big_options.setLatencyHint(
+ AudioContextLatencyCategoryOrDouble::FromDouble(
+ playback_context->baseLatency() * 2));
+ AudioContext* exact_too_big_context = AudioContext::Create(
+ GetDocument(), exact_too_big_options, ASSERT_NO_EXCEPTION);
+ EXPECT_EQ(exact_too_big_context->baseLatency(),
+ playback_context->baseLatency());
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_destination_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_destination_node.cc
new file mode 100644
index 00000000000..d3d89db4acf
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_destination_node.cc
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#include "third_party/blink/renderer/modules/webaudio/audio_destination_node.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node_input.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node_output.h"
+#include "third_party/blink/renderer/modules/webaudio/base_audio_context.h"
+#include "third_party/blink/renderer/platform/audio/audio_utilities.h"
+#include "third_party/blink/renderer/platform/audio/denormal_disabler.h"
+#include "third_party/blink/renderer/platform/instrumentation/tracing/trace_event.h"
+#include "third_party/blink/renderer/platform/wtf/atomics.h"
+
+namespace blink {
+
+AudioDestinationHandler::AudioDestinationHandler(AudioNode& node)
+ : AudioHandler(kNodeTypeDestination, node, 0), current_sample_frame_(0) {
+ AddInput();
+}
+
+AudioDestinationHandler::~AudioDestinationHandler() {
+ DCHECK(!IsInitialized());
+}
+
+void AudioDestinationHandler::Render(AudioBus* source_bus,
+ AudioBus* destination_bus,
+ size_t number_of_frames,
+ const AudioIOPosition& output_position) {
+ TRACE_EVENT0("webaudio", "AudioDestinationHandler::Render");
+
+ // We don't want denormals slowing down any of the audio processing
+ // since they can very seriously hurt performance. This will take care of all
+ // AudioNodes because they all process within this scope.
+ DenormalDisabler denormal_disabler;
+
+ // Need to check if the context actually alive. Otherwise the subsequent
+ // steps will fail. If the context is not alive somehow, return immediately
+ // and do nothing.
+ //
+ // TODO(hongchan): because the context can go away while rendering, so this
+ // check cannot guarantee the safe execution of the following steps.
+ DCHECK(Context());
+ if (!Context())
+ return;
+
+ Context()->GetDeferredTaskHandler().SetAudioThreadToCurrentThread();
+
+ // If the destination node is not initialized, pass the silence to the final
+ // audio destination (one step before the FIFO). This check is for the case
+ // where the destination is in the middle of tearing down process.
+ if (!IsInitialized()) {
+ destination_bus->Zero();
+ return;
+ }
+
+ // Let the context take care of any business at the start of each render
+ // quantum.
+ Context()->HandlePreRenderTasks(output_position);
+
+ // Prepare the local audio input provider for this render quantum.
+ if (source_bus)
+ local_audio_input_provider_.Set(source_bus);
+
+ DCHECK_GE(NumberOfInputs(), 1u);
+ if (NumberOfInputs() < 1) {
+ destination_bus->Zero();
+ return;
+ }
+ // This will cause the node(s) connected to us to process, which in turn will
+ // pull on their input(s), all the way backwards through the rendering graph.
+ AudioBus* rendered_bus = Input(0).Pull(destination_bus, number_of_frames);
+
+ if (!rendered_bus) {
+ destination_bus->Zero();
+ } else if (rendered_bus != destination_bus) {
+ // in-place processing was not possible - so copy
+ destination_bus->CopyFrom(*rendered_bus);
+ }
+
+ // Process nodes which need a little extra help because they are not connected
+ // to anything, but still need to process.
+ Context()->GetDeferredTaskHandler().ProcessAutomaticPullNodes(
+ number_of_frames);
+
+ // Let the context take care of any business at the end of each render
+ // quantum.
+ Context()->HandlePostRenderTasks();
+
+ // Advance current sample-frame.
+ size_t new_sample_frame = current_sample_frame_ + number_of_frames;
+ ReleaseStore(&current_sample_frame_, new_sample_frame);
+
+ Context()->UpdateWorkletGlobalScopeOnRenderingThread();
+}
+
+// ----------------------------------------------------------------
+
+AudioDestinationNode::AudioDestinationNode(BaseAudioContext& context)
+ : AudioNode(context) {}
+
+AudioDestinationHandler& AudioDestinationNode::GetAudioDestinationHandler()
+ const {
+ return static_cast<AudioDestinationHandler&>(Handler());
+}
+
+unsigned long AudioDestinationNode::maxChannelCount() const {
+ return GetAudioDestinationHandler().MaxChannelCount();
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_destination_node.h b/chromium/third_party/blink/renderer/modules/webaudio/audio_destination_node.h
new file mode 100644
index 00000000000..16de5099941
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_destination_node.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_DESTINATION_NODE_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_DESTINATION_NODE_H_
+
+#include "third_party/blink/renderer/modules/webaudio/audio_buffer.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node.h"
+#include "third_party/blink/renderer/platform/audio/audio_bus.h"
+#include "third_party/blink/renderer/platform/audio/audio_io_callback.h"
+#include "third_party/blink/renderer/platform/audio/audio_source_provider.h"
+
+namespace blink {
+
+class AudioBus;
+class BaseAudioContext;
+
+class AudioDestinationHandler : public AudioHandler, public AudioIOCallback {
+ public:
+ AudioDestinationHandler(AudioNode&);
+ ~AudioDestinationHandler() override;
+
+ // AudioHandler
+ void Process(size_t) final {
+ } // we're pulled by hardware so this is never called
+
+ // The audio hardware calls render() to get the next render quantum of audio
+ // into destinationBus. It will optionally give us local/live audio input in
+ // sourceBus (if it's not 0).
+ void Render(AudioBus* source_bus,
+ AudioBus* destination_bus,
+ size_t number_of_frames,
+ const AudioIOPosition& output_position) final;
+
+ size_t CurrentSampleFrame() const {
+ return AcquireLoad(&current_sample_frame_);
+ }
+ double CurrentTime() const {
+ return CurrentSampleFrame() / static_cast<double>(SampleRate());
+ }
+
+ virtual unsigned long MaxChannelCount() const { return 0; }
+
+ virtual void StartRendering() = 0;
+ virtual void StopRendering() = 0;
+
+ // The render thread needs to be changed after Worklet JS code is loaded by
+ // AudioWorklet. This method ensures the switching of render thread and the
+ // restart of the context.
+ virtual void RestartRendering() = 0;
+
+ // Returns the rendering callback buffer size.
+ virtual size_t CallbackBufferSize() const = 0;
+ virtual double SampleRate() const = 0;
+
+ // Returns the audio buffer size in frames used by the AudioContext.
+ virtual int FramesPerBuffer() const = 0;
+
+ protected:
+ // LocalAudioInputProvider allows us to expose an AudioSourceProvider for
+ // local/live audio input. If there is local/live audio input, we call set()
+ // with the audio input data every render quantum.
+ class LocalAudioInputProvider final : public AudioSourceProvider {
+ public:
+ LocalAudioInputProvider()
+ : source_bus_(AudioBus::Create(
+ 2,
+ AudioUtilities::kRenderQuantumFrames)) // FIXME: handle
+ // non-stereo local input.
+ {}
+
+ void Set(AudioBus* bus) {
+ if (bus)
+ source_bus_->CopyFrom(*bus);
+ }
+
+ // AudioSourceProvider.
+ void ProvideInput(AudioBus* destination_bus,
+ size_t number_of_frames) override {
+ bool is_good = destination_bus &&
+ destination_bus->length() == number_of_frames &&
+ source_bus_->length() == number_of_frames;
+ DCHECK(is_good);
+ if (is_good)
+ destination_bus->CopyFrom(*source_bus_);
+ }
+
+ private:
+ scoped_refptr<AudioBus> source_bus_;
+ };
+
+ // Counts the number of sample-frames processed by the destination.
+ size_t current_sample_frame_;
+
+ LocalAudioInputProvider local_audio_input_provider_;
+};
+
+class AudioDestinationNode : public AudioNode {
+ DEFINE_WRAPPERTYPEINFO();
+
+ public:
+ AudioDestinationHandler& GetAudioDestinationHandler() const;
+
+ unsigned long maxChannelCount() const;
+ size_t CallbackBufferSize() const { return Handler().CallbackBufferSize(); }
+
+ protected:
+ AudioDestinationNode(BaseAudioContext&);
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_DESTINATION_NODE_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_destination_node.idl b/chromium/third_party/blink/renderer/modules/webaudio/audio_destination_node.idl
new file mode 100644
index 00000000000..29911739982
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_destination_node.idl
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+// See https://webaudio.github.io/web-audio-api/#AudioDestinationNode
+interface AudioDestinationNode : AudioNode {
+ readonly attribute unsigned long maxChannelCount;
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_listener.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_listener.cc
new file mode 100644
index 00000000000..c928c47a69f
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_listener.cc
@@ -0,0 +1,323 @@
+/*
+ * Copyright (C) 2010 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "third_party/blink/renderer/modules/webaudio/audio_listener.h"
+#include "third_party/blink/renderer/modules/webaudio/panner_node.h"
+#include "third_party/blink/renderer/platform/audio/audio_bus.h"
+#include "third_party/blink/renderer/platform/audio/audio_utilities.h"
+#include "third_party/blink/renderer/platform/audio/hrtf_database_loader.h"
+
+namespace blink {
+
+AudioListener::AudioListener(BaseAudioContext& context)
+ : position_x_(AudioParam::Create(context,
+ kParamTypeAudioListenerPositionX,
+ "AudioListener.positionX",
+ 0.0)),
+ position_y_(AudioParam::Create(context,
+ kParamTypeAudioListenerPositionY,
+ "AudioListener.positionY",
+ 0.0)),
+ position_z_(AudioParam::Create(context,
+ kParamTypeAudioListenerPositionZ,
+ "AudioListener.positionZ",
+ 0.0)),
+ forward_x_(AudioParam::Create(context,
+ kParamTypeAudioListenerForwardX,
+ "AudioListener.forwardX",
+ 0.0)),
+ forward_y_(AudioParam::Create(context,
+ kParamTypeAudioListenerForwardY,
+ "AudioListener.forwardY",
+ 0.0)),
+ forward_z_(AudioParam::Create(context,
+ kParamTypeAudioListenerForwardZ,
+ "AudioListener.forwardZ",
+ -1.0)),
+ up_x_(AudioParam::Create(context,
+ kParamTypeAudioListenerUpX,
+ "AudioListener.upX",
+ 0.0)),
+ up_y_(AudioParam::Create(context,
+ kParamTypeAudioListenerUpY,
+ "AudioListener.upY",
+ 1.0)),
+ up_z_(AudioParam::Create(context,
+ kParamTypeAudioListenerUpZ,
+ "AudioListener.upZ",
+ 0.0)),
+ last_update_time_(-1),
+ is_listener_dirty_(false),
+ position_x_values_(AudioUtilities::kRenderQuantumFrames),
+ position_y_values_(AudioUtilities::kRenderQuantumFrames),
+ position_z_values_(AudioUtilities::kRenderQuantumFrames),
+ forward_x_values_(AudioUtilities::kRenderQuantumFrames),
+ forward_y_values_(AudioUtilities::kRenderQuantumFrames),
+ forward_z_values_(AudioUtilities::kRenderQuantumFrames),
+ up_x_values_(AudioUtilities::kRenderQuantumFrames),
+ up_y_values_(AudioUtilities::kRenderQuantumFrames),
+ up_z_values_(AudioUtilities::kRenderQuantumFrames) {
+ // Initialize the cached values with the current values. Thus, we don't need
+ // to notify any panners because we haved moved.
+ last_position_ = GetPosition();
+ last_forward_ = Orientation();
+ last_up_ = UpVector();
+}
+
+AudioListener::~AudioListener() = default;
+
+void AudioListener::Trace(blink::Visitor* visitor) {
+ visitor->Trace(position_x_);
+ visitor->Trace(position_y_);
+ visitor->Trace(position_z_);
+
+ visitor->Trace(forward_x_);
+ visitor->Trace(forward_y_);
+ visitor->Trace(forward_z_);
+
+ visitor->Trace(up_x_);
+ visitor->Trace(up_y_);
+ visitor->Trace(up_z_);
+
+ ScriptWrappable::Trace(visitor);
+}
+
+void AudioListener::AddPanner(PannerHandler& panner) {
+ DCHECK(IsMainThread());
+ panners_.insert(&panner);
+}
+
+void AudioListener::RemovePanner(PannerHandler& panner) {
+ DCHECK(IsMainThread());
+ DCHECK(panners_.Contains(&panner));
+ panners_.erase(&panner);
+}
+
+bool AudioListener::HasSampleAccurateValues() const {
+ return positionX()->Handler().HasSampleAccurateValues() ||
+ positionY()->Handler().HasSampleAccurateValues() ||
+ positionZ()->Handler().HasSampleAccurateValues() ||
+ forwardX()->Handler().HasSampleAccurateValues() ||
+ forwardY()->Handler().HasSampleAccurateValues() ||
+ forwardZ()->Handler().HasSampleAccurateValues() ||
+ upX()->Handler().HasSampleAccurateValues() ||
+ upY()->Handler().HasSampleAccurateValues() ||
+ upZ()->Handler().HasSampleAccurateValues();
+}
+
+void AudioListener::UpdateValuesIfNeeded(size_t frames_to_process) {
+ double current_time =
+ positionX()->Handler().DestinationHandler().CurrentTime();
+ if (last_update_time_ != current_time) {
+ // Time has changed. Update all of the automation values now.
+ last_update_time_ = current_time;
+
+ bool sizes_are_good = frames_to_process <= position_x_values_.size() &&
+ frames_to_process <= position_y_values_.size() &&
+ frames_to_process <= position_z_values_.size() &&
+ frames_to_process <= forward_x_values_.size() &&
+ frames_to_process <= forward_y_values_.size() &&
+ frames_to_process <= forward_z_values_.size() &&
+ frames_to_process <= up_x_values_.size() &&
+ frames_to_process <= up_y_values_.size() &&
+ frames_to_process <= up_z_values_.size();
+
+ DCHECK(sizes_are_good);
+ if (!sizes_are_good)
+ return;
+
+ positionX()->Handler().CalculateSampleAccurateValues(
+ position_x_values_.Data(), frames_to_process);
+ positionY()->Handler().CalculateSampleAccurateValues(
+ position_y_values_.Data(), frames_to_process);
+ positionZ()->Handler().CalculateSampleAccurateValues(
+ position_z_values_.Data(), frames_to_process);
+
+ forwardX()->Handler().CalculateSampleAccurateValues(
+ forward_x_values_.Data(), frames_to_process);
+ forwardY()->Handler().CalculateSampleAccurateValues(
+ forward_y_values_.Data(), frames_to_process);
+ forwardZ()->Handler().CalculateSampleAccurateValues(
+ forward_z_values_.Data(), frames_to_process);
+
+ upX()->Handler().CalculateSampleAccurateValues(up_x_values_.Data(),
+ frames_to_process);
+ upY()->Handler().CalculateSampleAccurateValues(up_y_values_.Data(),
+ frames_to_process);
+ upZ()->Handler().CalculateSampleAccurateValues(up_z_values_.Data(),
+ frames_to_process);
+ }
+}
+
+const float* AudioListener::GetPositionXValues(size_t frames_to_process) {
+ UpdateValuesIfNeeded(frames_to_process);
+ return position_x_values_.Data();
+}
+
+const float* AudioListener::GetPositionYValues(size_t frames_to_process) {
+ UpdateValuesIfNeeded(frames_to_process);
+ return position_y_values_.Data();
+}
+
+const float* AudioListener::GetPositionZValues(size_t frames_to_process) {
+ UpdateValuesIfNeeded(frames_to_process);
+ return position_z_values_.Data();
+}
+
+const float* AudioListener::GetForwardXValues(size_t frames_to_process) {
+ UpdateValuesIfNeeded(frames_to_process);
+ return forward_x_values_.Data();
+}
+
+const float* AudioListener::GetForwardYValues(size_t frames_to_process) {
+ UpdateValuesIfNeeded(frames_to_process);
+ return forward_y_values_.Data();
+}
+
+const float* AudioListener::GetForwardZValues(size_t frames_to_process) {
+ UpdateValuesIfNeeded(frames_to_process);
+ return forward_z_values_.Data();
+}
+
+const float* AudioListener::GetUpXValues(size_t frames_to_process) {
+ UpdateValuesIfNeeded(frames_to_process);
+ return up_x_values_.Data();
+}
+
+const float* AudioListener::GetUpYValues(size_t frames_to_process) {
+ UpdateValuesIfNeeded(frames_to_process);
+ return up_y_values_.Data();
+}
+
+const float* AudioListener::GetUpZValues(size_t frames_to_process) {
+ UpdateValuesIfNeeded(frames_to_process);
+ return up_z_values_.Data();
+}
+
+void AudioListener::UpdateState() {
+ // This must be called from the audio thread in pre or post render phase of
+ // the graph processing. (AudioListener doesn't have access to the context
+ // to check for the audio thread.)
+ DCHECK(!IsMainThread());
+
+ MutexTryLocker try_locker(listener_lock_);
+ if (try_locker.Locked()) {
+ FloatPoint3D current_position = GetPosition();
+ FloatPoint3D current_forward = Orientation();
+ FloatPoint3D current_up = UpVector();
+
+ is_listener_dirty_ = current_position != last_position_ ||
+ current_forward != last_forward_ ||
+ current_up != last_up_;
+
+ if (is_listener_dirty_) {
+ last_position_ = current_position;
+ last_forward_ = current_forward;
+ last_up_ = current_up;
+ }
+ } else {
+ // Main thread must be updating the position, forward, or up vector;
+ // just assume the listener is dirty. At worst, we'll do a little more
+ // work than necessary for one rendering quantum.
+ is_listener_dirty_ = true;
+ }
+}
+
+void AudioListener::CreateAndLoadHRTFDatabaseLoader(float sample_rate) {
+ DCHECK(IsMainThread());
+
+ if (!hrtf_database_loader_)
+ hrtf_database_loader_ =
+ HRTFDatabaseLoader::CreateAndLoadAsynchronouslyIfNecessary(sample_rate);
+}
+
+bool AudioListener::IsHRTFDatabaseLoaded() {
+ return hrtf_database_loader_ && hrtf_database_loader_->IsLoaded();
+}
+
+void AudioListener::WaitForHRTFDatabaseLoaderThreadCompletion() {
+ if (hrtf_database_loader_)
+ hrtf_database_loader_->WaitForLoaderThreadCompletion();
+}
+
+void AudioListener::MarkPannersAsDirty(unsigned type) {
+ DCHECK(IsMainThread());
+ for (PannerHandler* panner : panners_)
+ panner->MarkPannerAsDirty(type);
+}
+
+void AudioListener::setPosition(const FloatPoint3D& position,
+ ExceptionState& exceptionState) {
+ DCHECK(IsMainThread());
+
+ // This synchronizes with panner's process().
+ MutexLocker listener_locker(listener_lock_);
+
+ double now = position_x_->Context()->currentTime();
+
+ position_x_->setValueAtTime(position.X(), now, exceptionState);
+ position_y_->setValueAtTime(position.Y(), now, exceptionState);
+ position_z_->setValueAtTime(position.Z(), now, exceptionState);
+
+ MarkPannersAsDirty(PannerHandler::kAzimuthElevationDirty |
+ PannerHandler::kDistanceConeGainDirty);
+}
+
+void AudioListener::setOrientation(const FloatPoint3D& orientation,
+ ExceptionState& exceptionState) {
+ DCHECK(IsMainThread());
+
+ // This synchronizes with panner's process().
+ MutexLocker listener_locker(listener_lock_);
+
+ double now = forward_x_->Context()->currentTime();
+
+ forward_x_->setValueAtTime(orientation.X(), now, exceptionState);
+ forward_y_->setValueAtTime(orientation.Y(), now, exceptionState);
+ forward_z_->setValueAtTime(orientation.Z(), now, exceptionState);
+
+ MarkPannersAsDirty(PannerHandler::kAzimuthElevationDirty);
+}
+
+void AudioListener::SetUpVector(const FloatPoint3D& up_vector,
+ ExceptionState& exceptionState) {
+ DCHECK(IsMainThread());
+
+ // This synchronizes with panner's process().
+ MutexLocker listener_locker(listener_lock_);
+
+ double now = up_x_->Context()->currentTime();
+
+ up_x_->setValueAtTime(up_vector.X(), now, exceptionState);
+ up_y_->setValueAtTime(up_vector.Y(), now, exceptionState);
+ up_z_->setValueAtTime(up_vector.Z(), now, exceptionState);
+
+ MarkPannersAsDirty(PannerHandler::kAzimuthElevationDirty);
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_listener.h b/chromium/third_party/blink/renderer/modules/webaudio/audio_listener.h
new file mode 100644
index 00000000000..0a29121ff02
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_listener.h
@@ -0,0 +1,197 @@
+/*
+ * Copyright (C) 2010 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_LISTENER_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_LISTENER_H_
+
+#include "third_party/blink/renderer/modules/webaudio/audio_param.h"
+#include "third_party/blink/renderer/platform/bindings/script_wrappable.h"
+#include "third_party/blink/renderer/platform/geometry/float_point_3d.h"
+#include "third_party/blink/renderer/platform/heap/handle.h"
+
+namespace blink {
+
+class HRTFDatabaseLoader;
+class PannerHandler;
+
+// AudioListener maintains the state of the listener in the audio scene as
+// defined in the OpenAL specification.
+
+class AudioListener : public ScriptWrappable {
+ DEFINE_WRAPPERTYPEINFO();
+
+ public:
+ static AudioListener* Create(BaseAudioContext& context) {
+ return new AudioListener(context);
+ }
+ virtual ~AudioListener();
+
+ // Location of the listener
+ AudioParam* positionX() const { return position_x_; };
+ AudioParam* positionY() const { return position_y_; };
+ AudioParam* positionZ() const { return position_z_; };
+
+ // Forward direction vector of the listener
+ AudioParam* forwardX() const { return forward_x_; };
+ AudioParam* forwardY() const { return forward_y_; };
+ AudioParam* forwardZ() const { return forward_z_; };
+
+ // Up direction vector for the listener
+ AudioParam* upX() const { return up_x_; };
+ AudioParam* upY() const { return up_y_; };
+ AudioParam* upZ() const { return up_z_; };
+
+ // True if any of AudioParams have automations.
+ bool HasSampleAccurateValues() const;
+
+ // Update the internal state of the listener, including updating the dirty
+ // state of all PannerNodes if necessary.
+ void UpdateState();
+
+ bool IsListenerDirty() const { return is_listener_dirty_; }
+
+ const FloatPoint3D GetPosition() const {
+ return FloatPoint3D(position_x_->value(), position_y_->value(),
+ position_z_->value());
+ }
+ const FloatPoint3D Orientation() const {
+ return FloatPoint3D(forward_x_->value(), forward_y_->value(),
+ forward_z_->value());
+ }
+ const FloatPoint3D UpVector() const {
+ return FloatPoint3D(up_x_->value(), up_y_->value(), up_z_->value());
+ }
+
+ const float* GetPositionXValues(size_t frames_to_process);
+ const float* GetPositionYValues(size_t frames_to_process);
+ const float* GetPositionZValues(size_t frames_to_process);
+
+ const float* GetForwardXValues(size_t frames_to_process);
+ const float* GetForwardYValues(size_t frames_to_process);
+ const float* GetForwardZValues(size_t frames_to_process);
+
+ const float* GetUpXValues(size_t frames_to_process);
+ const float* GetUpYValues(size_t frames_to_process);
+ const float* GetUpZValues(size_t frames_to_process);
+
+ // Position
+ void setPosition(float x, float y, float z, ExceptionState& exceptionState) {
+ setPosition(FloatPoint3D(x, y, z), exceptionState);
+ }
+
+ // Orientation and Up-vector
+ void setOrientation(float x,
+ float y,
+ float z,
+ float up_x,
+ float up_y,
+ float up_z,
+ ExceptionState& exceptionState) {
+ setOrientation(FloatPoint3D(x, y, z), exceptionState);
+ SetUpVector(FloatPoint3D(up_x, up_y, up_z), exceptionState);
+ }
+
+ Mutex& ListenerLock() { return listener_lock_; }
+ void AddPanner(PannerHandler&);
+ void RemovePanner(PannerHandler&);
+
+ // HRTF DB loader
+ HRTFDatabaseLoader* HrtfDatabaseLoader() {
+ return hrtf_database_loader_.get();
+ }
+ void CreateAndLoadHRTFDatabaseLoader(float);
+ bool IsHRTFDatabaseLoaded();
+ void WaitForHRTFDatabaseLoaderThreadCompletion();
+
+ void Trace(blink::Visitor*);
+
+ private:
+ AudioListener(BaseAudioContext&);
+
+ void setPosition(const FloatPoint3D&, ExceptionState&);
+ void setOrientation(const FloatPoint3D&, ExceptionState&);
+ void SetUpVector(const FloatPoint3D&, ExceptionState&);
+
+ void MarkPannersAsDirty(unsigned);
+
+ // Location of the listener
+ Member<AudioParam> position_x_;
+ Member<AudioParam> position_y_;
+ Member<AudioParam> position_z_;
+
+ // Forward direction vector of the listener
+ Member<AudioParam> forward_x_;
+ Member<AudioParam> forward_y_;
+ Member<AudioParam> forward_z_;
+
+ // Up direction vector for the listener
+ Member<AudioParam> up_x_;
+ Member<AudioParam> up_y_;
+ Member<AudioParam> up_z_;
+
+ // The position, forward, and up vectors from the last rendering quantum.
+ FloatPoint3D last_position_;
+ FloatPoint3D last_forward_;
+ FloatPoint3D last_up_;
+
+ // Last time that the automations were updated.
+ double last_update_time_;
+
+ // Set every rendering quantum if the listener has moved in any way
+ // (position, forward, or up). This should only be read or written to from
+ // the audio thread.
+ bool is_listener_dirty_;
+
+ void UpdateValuesIfNeeded(size_t frames_to_process);
+
+ AudioFloatArray position_x_values_;
+ AudioFloatArray position_y_values_;
+ AudioFloatArray position_z_values_;
+
+ AudioFloatArray forward_x_values_;
+ AudioFloatArray forward_y_values_;
+ AudioFloatArray forward_z_values_;
+
+ AudioFloatArray up_x_values_;
+ AudioFloatArray up_y_values_;
+ AudioFloatArray up_z_values_;
+
+ // Synchronize a panner's process() with setting of the state of the listener.
+ mutable Mutex listener_lock_;
+ // List for pannerNodes in context. This is updated only in the main thread,
+ // and can be referred in audio thread.
+ // These raw pointers are safe because PannerHandler::uninitialize()
+ // unregisters it from m_panners.
+ HashSet<PannerHandler*> panners_;
+ // HRTF DB loader for panner node.
+ scoped_refptr<HRTFDatabaseLoader> hrtf_database_loader_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_LISTENER_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_listener.idl b/chromium/third_party/blink/renderer/modules/webaudio/audio_listener.idl
new file mode 100644
index 00000000000..de2f898198c
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_listener.idl
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2010 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// See https://webaudio.github.io/web-audio-api/#audiolistener
+interface AudioListener {
+ [RaisesException, MeasureAs=AudioListenerSetPosition] void setPosition(float x, float y, float z);
+ [RaisesException, MeasureAs=AudioListenerSetOrientation] void setOrientation(float x, float y, float z, float xUp, float yUp, float zUp);
+
+ // Location of the listener
+ readonly attribute AudioParam positionX;
+ readonly attribute AudioParam positionY;
+ readonly attribute AudioParam positionZ;
+
+ // Forward direction vector of the listener
+ readonly attribute AudioParam forwardX;
+ readonly attribute AudioParam forwardY;
+ readonly attribute AudioParam forwardZ;
+
+ // Up direction vector for the listener
+ readonly attribute AudioParam upX;
+ readonly attribute AudioParam upY;
+ readonly attribute AudioParam upZ;
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_node.cc
new file mode 100644
index 00000000000..32c8c6f03b1
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_node.cc
@@ -0,0 +1,1037 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#include "third_party/blink/renderer/bindings/core/v8/exception_messages.h"
+#include "third_party/blink/renderer/bindings/core/v8/exception_state.h"
+#include "third_party/blink/renderer/core/dom/exception_code.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node_input.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node_options.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node_output.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_param.h"
+#include "third_party/blink/renderer/modules/webaudio/base_audio_context.h"
+#include "third_party/blink/renderer/platform/instance_counters.h"
+#include "third_party/blink/renderer/platform/wtf/atomics.h"
+
+#if DEBUG_AUDIONODE_REFERENCES
+#include <stdio.h>
+#endif
+
+namespace blink {
+
+AudioHandler::AudioHandler(NodeType node_type,
+ AudioNode& node,
+ float sample_rate)
+ : is_initialized_(false),
+ node_type_(kNodeTypeUnknown),
+ node_(&node),
+ context_(node.context()),
+ last_processing_time_(-1),
+ last_non_silent_time_(0),
+ connection_ref_count_(0),
+ is_disabled_(false),
+ channel_count_(2) {
+ SetNodeType(node_type);
+ SetInternalChannelCountMode(kMax);
+ SetInternalChannelInterpretation(AudioBus::kSpeakers);
+
+#if DEBUG_AUDIONODE_REFERENCES
+ if (!is_node_count_initialized_) {
+ is_node_count_initialized_ = true;
+ atexit(AudioHandler::PrintNodeCounts);
+ }
+#endif
+ InstanceCounters::IncrementCounter(InstanceCounters::kAudioHandlerCounter);
+
+#if DEBUG_AUDIONODE_REFERENCES
+ fprintf(
+ stderr,
+ "[%16p]: %16p: %2d: AudioHandler::AudioHandler() %d [%d] total: %u\n",
+ Context(), this, GetNodeType(), connection_ref_count_,
+ node_count_[GetNodeType()],
+ InstanceCounters::CounterValue(InstanceCounters::kAudioHandlerCounter));
+#endif
+}
+
+AudioHandler::~AudioHandler() {
+ DCHECK(IsMainThread());
+ // dispose() should be called.
+ DCHECK(!GetNode());
+ InstanceCounters::DecrementCounter(InstanceCounters::kAudioHandlerCounter);
+#if DEBUG_AUDIONODE_REFERENCES
+ --node_count_[GetNodeType()];
+ fprintf(
+ stderr,
+ "[%16p]: %16p: %2d: AudioHandler::~AudioHandler() %d [%d] remaining: "
+ "%u\n",
+ Context(), this, GetNodeType(), connection_ref_count_,
+ node_count_[GetNodeType()],
+ InstanceCounters::CounterValue(InstanceCounters::kAudioHandlerCounter));
+#endif
+}
+
+void AudioHandler::Initialize() {
+ DCHECK_EQ(new_channel_count_mode_, channel_count_mode_);
+ DCHECK_EQ(new_channel_interpretation_, channel_interpretation_);
+
+ is_initialized_ = true;
+}
+
+void AudioHandler::Uninitialize() {
+ is_initialized_ = false;
+}
+
+void AudioHandler::Dispose() {
+ DCHECK(IsMainThread());
+ DCHECK(Context()->IsGraphOwner());
+
+ Context()->GetDeferredTaskHandler().RemoveChangedChannelCountMode(this);
+ Context()->GetDeferredTaskHandler().RemoveChangedChannelInterpretation(this);
+ Context()->GetDeferredTaskHandler().RemoveAutomaticPullNode(this);
+ for (auto& output : outputs_)
+ output->Dispose();
+ node_ = nullptr;
+}
+
+AudioNode* AudioHandler::GetNode() const {
+ DCHECK(IsMainThread());
+ return node_;
+}
+
+BaseAudioContext* AudioHandler::Context() const {
+ return context_;
+}
+
+String AudioHandler::NodeTypeName() const {
+ switch (node_type_) {
+ case kNodeTypeDestination:
+ return "AudioDestinationNode";
+ case kNodeTypeOscillator:
+ return "OscillatorNode";
+ case kNodeTypeAudioBufferSource:
+ return "AudioBufferSourceNode";
+ case kNodeTypeMediaElementAudioSource:
+ return "MediaElementAudioSourceNode";
+ case kNodeTypeMediaStreamAudioDestination:
+ return "MediaStreamAudioDestinationNode";
+ case kNodeTypeMediaStreamAudioSource:
+ return "MediaStreamAudioSourceNode";
+ case kNodeTypeScriptProcessor:
+ return "ScriptProcessorNode";
+ case kNodeTypeBiquadFilter:
+ return "BiquadFilterNode";
+ case kNodeTypePanner:
+ return "PannerNode";
+ case kNodeTypeStereoPanner:
+ return "StereoPannerNode";
+ case kNodeTypeConvolver:
+ return "ConvolverNode";
+ case kNodeTypeDelay:
+ return "DelayNode";
+ case kNodeTypeGain:
+ return "GainNode";
+ case kNodeTypeChannelSplitter:
+ return "ChannelSplitterNode";
+ case kNodeTypeChannelMerger:
+ return "ChannelMergerNode";
+ case kNodeTypeAnalyser:
+ return "AnalyserNode";
+ case kNodeTypeDynamicsCompressor:
+ return "DynamicsCompressorNode";
+ case kNodeTypeWaveShaper:
+ return "WaveShaperNode";
+ case kNodeTypeUnknown:
+ case kNodeTypeEnd:
+ default:
+ NOTREACHED();
+ return "UnknownNode";
+ }
+}
+
+void AudioHandler::SetNodeType(NodeType type) {
+ // Don't allow the node type to be changed to a different node type, after
+ // it's already been set. And the new type can't be unknown or end.
+ DCHECK_EQ(node_type_, kNodeTypeUnknown);
+ DCHECK_NE(type, kNodeTypeUnknown);
+ DCHECK_NE(type, kNodeTypeEnd);
+
+ node_type_ = type;
+
+#if DEBUG_AUDIONODE_REFERENCES
+ ++node_count_[type];
+ fprintf(stderr, "[%16p]: %16p: %2d: AudioHandler::AudioHandler [%3d]\n",
+ Context(), this, GetNodeType(), node_count_[GetNodeType()]);
+#endif
+}
+
+void AudioHandler::AddInput() {
+ inputs_.push_back(AudioNodeInput::Create(*this));
+}
+
+void AudioHandler::AddOutput(unsigned number_of_channels) {
+ DCHECK(IsMainThread());
+ outputs_.push_back(AudioNodeOutput::Create(this, number_of_channels));
+ GetNode()->DidAddOutput(NumberOfOutputs());
+}
+
+AudioNodeInput& AudioHandler::Input(unsigned i) {
+ return *inputs_[i];
+}
+
+AudioNodeOutput& AudioHandler::Output(unsigned i) {
+ return *outputs_[i];
+}
+
+unsigned long AudioHandler::ChannelCount() {
+ return channel_count_;
+}
+
+void AudioHandler::SetInternalChannelCountMode(ChannelCountMode mode) {
+ channel_count_mode_ = mode;
+ new_channel_count_mode_ = mode;
+}
+
+void AudioHandler::SetInternalChannelInterpretation(
+ AudioBus::ChannelInterpretation interpretation) {
+ channel_interpretation_ = interpretation;
+ new_channel_interpretation_ = interpretation;
+}
+
+void AudioHandler::SetChannelCount(unsigned long channel_count,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+ BaseAudioContext::GraphAutoLocker locker(Context());
+
+ if (channel_count > 0 &&
+ channel_count <= BaseAudioContext::MaxNumberOfChannels()) {
+ if (channel_count_ != channel_count) {
+ channel_count_ = channel_count;
+ if (channel_count_mode_ != kMax)
+ UpdateChannelsForInputs();
+ }
+ } else {
+ exception_state.ThrowDOMException(
+ kNotSupportedError, ExceptionMessages::IndexOutsideRange<unsigned long>(
+ "channel count", channel_count, 1,
+ ExceptionMessages::kInclusiveBound,
+ BaseAudioContext::MaxNumberOfChannels(),
+ ExceptionMessages::kInclusiveBound));
+ }
+}
+
+String AudioHandler::GetChannelCountMode() {
+ // Because we delay the actual setting of the mode to the pre or post
+ // rendering phase, we want to return the value that was set, not the actual
+ // current mode.
+ switch (new_channel_count_mode_) {
+ case kMax:
+ return "max";
+ case kClampedMax:
+ return "clamped-max";
+ case kExplicit:
+ return "explicit";
+ }
+ NOTREACHED();
+ return "";
+}
+
+void AudioHandler::SetChannelCountMode(const String& mode,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+ BaseAudioContext::GraphAutoLocker locker(Context());
+
+ ChannelCountMode old_mode = channel_count_mode_;
+
+ if (mode == "max") {
+ new_channel_count_mode_ = kMax;
+ } else if (mode == "clamped-max") {
+ new_channel_count_mode_ = kClampedMax;
+ } else if (mode == "explicit") {
+ new_channel_count_mode_ = kExplicit;
+ } else {
+ NOTREACHED();
+ }
+
+ if (new_channel_count_mode_ != old_mode)
+ Context()->GetDeferredTaskHandler().AddChangedChannelCountMode(this);
+}
+
+String AudioHandler::ChannelInterpretation() {
+ // Because we delay the actual setting of the interpreation to the pre or
+ // post rendering phase, we want to return the value that was set, not the
+ // actual current interpretation.
+ switch (new_channel_interpretation_) {
+ case AudioBus::kSpeakers:
+ return "speakers";
+ case AudioBus::kDiscrete:
+ return "discrete";
+ }
+ NOTREACHED();
+ return "";
+}
+
+void AudioHandler::SetChannelInterpretation(const String& interpretation,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+ BaseAudioContext::GraphAutoLocker locker(Context());
+
+ AudioBus::ChannelInterpretation old_mode = channel_interpretation_;
+
+ if (interpretation == "speakers") {
+ new_channel_interpretation_ = AudioBus::kSpeakers;
+ } else if (interpretation == "discrete") {
+ new_channel_interpretation_ = AudioBus::kDiscrete;
+ } else {
+ NOTREACHED();
+ }
+
+ if (new_channel_interpretation_ != old_mode)
+ Context()->GetDeferredTaskHandler().AddChangedChannelInterpretation(this);
+}
+
+void AudioHandler::UpdateChannelsForInputs() {
+ for (auto& input : inputs_)
+ input->ChangedOutputs();
+}
+
+void AudioHandler::ProcessIfNecessary(size_t frames_to_process) {
+ DCHECK(Context()->IsAudioThread());
+
+ if (!IsInitialized())
+ return;
+
+ // Ensure that we only process once per rendering quantum.
+ // This handles the "fanout" problem where an output is connected to multiple
+ // inputs. The first time we're called during this time slice we process, but
+ // after that we don't want to re-process, instead our output(s) will already
+ // have the results cached in their bus;
+ double current_time = Context()->currentTime();
+ if (last_processing_time_ != current_time) {
+ // important to first update this time because of feedback loops in the
+ // rendering graph.
+ last_processing_time_ = current_time;
+
+ PullInputs(frames_to_process);
+
+ bool silent_inputs = InputsAreSilent();
+ if (silent_inputs && PropagatesSilence()) {
+ SilenceOutputs();
+ // AudioParams still need to be processed so that the value can be updated
+ // if there are automations or so that the upstream nodes get pulled if
+ // any are connected to the AudioParam.
+ ProcessOnlyAudioParams(frames_to_process);
+ } else {
+ // Unsilence the outputs first because the processing of the node may
+ // cause the outputs to go silent and we want to propagate that hint to
+ // the downstream nodes. (For example, a Gain node with a gain of 0 will
+ // want to silence its output.)
+ UnsilenceOutputs();
+ Process(frames_to_process);
+ }
+
+ if (!silent_inputs) {
+ // Update |last_non_silent_time| AFTER processing this block.
+ // Doing it before causes |PropagateSilence()| to be one render
+ // quantum longer than necessary.
+ last_non_silent_time_ =
+ (Context()->CurrentSampleFrame() + frames_to_process) /
+ static_cast<double>(Context()->sampleRate());
+ }
+ }
+}
+
+void AudioHandler::CheckNumberOfChannelsForInput(AudioNodeInput* input) {
+ DCHECK(Context()->IsAudioThread());
+ DCHECK(Context()->IsGraphOwner());
+
+ DCHECK(inputs_.Contains(input));
+ if (!inputs_.Contains(input))
+ return;
+
+ input->UpdateInternalBus();
+}
+
+bool AudioHandler::PropagatesSilence() const {
+ return last_non_silent_time_ + LatencyTime() + TailTime() <
+ Context()->currentTime();
+}
+
+void AudioHandler::PullInputs(size_t frames_to_process) {
+ DCHECK(Context()->IsAudioThread());
+
+ // Process all of the AudioNodes connected to our inputs.
+ for (auto& input : inputs_)
+ input->Pull(nullptr, frames_to_process);
+}
+
+bool AudioHandler::InputsAreSilent() {
+ for (auto& input : inputs_) {
+ if (!input->Bus()->IsSilent())
+ return false;
+ }
+ return true;
+}
+
+void AudioHandler::SilenceOutputs() {
+ for (auto& output : outputs_)
+ output->Bus()->Zero();
+}
+
+void AudioHandler::UnsilenceOutputs() {
+ for (auto& output : outputs_)
+ output->Bus()->ClearSilentFlag();
+}
+
+void AudioHandler::EnableOutputsIfNecessary() {
+ DCHECK(IsMainThread());
+ BaseAudioContext::GraphAutoLocker locker(Context());
+
+ // We're enabling outputs for this handler. Remove this from the tail
+ // processing list (if it's there) so that we don't inadvertently disable the
+ // outputs later on when the tail processing time has elapsed.
+ Context()->GetDeferredTaskHandler().RemoveTailProcessingHandler(this, false);
+
+ if (is_disabled_ && connection_ref_count_ > 0) {
+ is_disabled_ = false;
+ for (auto& output : outputs_)
+ output->Enable();
+ }
+}
+
+void AudioHandler::DisableOutputsIfNecessary() {
+ // This function calls other functions that require graph ownership,
+ // so assert that this needs graph ownership too.
+ DCHECK(Context()->IsGraphOwner());
+
+ // Disable outputs if appropriate. We do this if the number of connections is
+ // 0 or 1. The case of 0 is from deref() where there are no connections left.
+ // The case of 1 is from AudioNodeInput::disable() where we want to disable
+ // outputs when there's only one connection left because we're ready to go
+ // away, but can't quite yet.
+ if (connection_ref_count_ <= 1 && !is_disabled_) {
+ // Still may have JavaScript references, but no more "active" connection
+ // references, so put all of our outputs in a "dormant" disabled state.
+ // Garbage collection may take a very long time after this time, so the
+ // "dormant" disabled nodes should not bog down the rendering...
+
+ // As far as JavaScript is concerned, our outputs must still appear to be
+ // connected. But internally our outputs should be disabled from the inputs
+ // they're connected to. disable() can recursively deref connections (and
+ // call disable()) down a whole chain of connected nodes.
+
+ // If a node requires tail processing, we defer the disabling of
+ // the outputs so that the tail for the node can be output.
+ // Otherwise, we can disable the outputs right away.
+ if (RequiresTailProcessing()) {
+ if (Context()->ContextState() !=
+ BaseAudioContext::AudioContextState::kClosed) {
+ Context()->GetDeferredTaskHandler().AddTailProcessingHandler(this);
+ }
+ } else {
+ DisableOutputs();
+ }
+ }
+}
+
+void AudioHandler::DisableOutputs() {
+ is_disabled_ = true;
+ for (auto& output : outputs_)
+ output->Disable();
+}
+
+void AudioHandler::MakeConnection() {
+ AtomicIncrement(&connection_ref_count_);
+
+#if DEBUG_AUDIONODE_REFERENCES
+ fprintf(
+ stderr,
+ "[%16p]: %16p: %2d: AudioHandler::MakeConnection %3d [%3d] @%.15g\n",
+ Context(), this, GetNodeType(), connection_ref_count_,
+ node_count_[GetNodeType()], Context()->currentTime());
+#endif
+
+ // See the disabling code in disableOutputsIfNecessary(). This handles
+ // the case where a node is being re-connected after being used at least
+ // once and disconnected. In this case, we need to re-enable.
+ EnableOutputsIfNecessary();
+}
+
+void AudioHandler::BreakConnection() {
+ // The actual work for deref happens completely within the audio context's
+ // graph lock. In the case of the audio thread, we must use a tryLock to
+ // avoid glitches.
+ bool has_lock = false;
+ if (Context()->IsAudioThread()) {
+ // Real-time audio thread must not contend lock (to avoid glitches).
+ has_lock = Context()->TryLock();
+ } else {
+ Context()->lock();
+ has_lock = true;
+ }
+
+ if (has_lock) {
+ BreakConnectionWithLock();
+ Context()->unlock();
+ } else {
+ // We were unable to get the lock, so put this in a list to finish up
+ // later.
+ DCHECK(Context()->IsAudioThread());
+ Context()->GetDeferredTaskHandler().AddDeferredBreakConnection(*this);
+ }
+}
+
+void AudioHandler::BreakConnectionWithLock() {
+ AtomicDecrement(&connection_ref_count_);
+
+#if DEBUG_AUDIONODE_REFERENCES
+ fprintf(stderr,
+ "[%16p]: %16p: %2d: AudioHandler::BreakConnectionWitLock %3d [%3d] "
+ "@%.15g\n",
+ Context(), this, GetNodeType(), connection_ref_count_,
+ node_count_[GetNodeType()], Context()->currentTime());
+#endif
+
+ if (!connection_ref_count_)
+ DisableOutputsIfNecessary();
+}
+
+#if DEBUG_AUDIONODE_REFERENCES
+
+bool AudioHandler::is_node_count_initialized_ = false;
+int AudioHandler::node_count_[kNodeTypeEnd];
+
+void AudioHandler::PrintNodeCounts() {
+ fprintf(stderr, "\n\n");
+ fprintf(stderr, "===========================\n");
+ fprintf(stderr, "AudioNode: reference counts\n");
+ fprintf(stderr, "===========================\n");
+
+ for (unsigned i = 0; i < kNodeTypeEnd; ++i)
+ fprintf(stderr, "%2d: %d\n", i, node_count_[i]);
+
+ fprintf(stderr, "===========================\n\n\n");
+}
+
+#endif // DEBUG_AUDIONODE_REFERENCES
+
+#if DEBUG_AUDIONODE_REFERENCES > 1
+void AudioHandler::TailProcessingDebug(const char* note) {
+ fprintf(stderr, "[%16p]: %16p: %2d: %s %d @%.15g", Context(), this,
+ GetNodeType(), note, connection_ref_count_, Context()->currentTime());
+
+ // If we're on the audio thread, we can print out the tail and
+ // latency times (because these methods can only be called from the
+ // audio thread.)
+ if (Context()->IsAudioThread()) {
+ fprintf(stderr, ", tail=%.15g + %.15g, last=%.15g\n", TailTime(),
+ LatencyTime(), last_non_silent_time_);
+ }
+
+ fprintf(stderr, "\n");
+}
+
+void AudioHandler::AddTailProcessingDebug() {
+ TailProcessingDebug("addTail");
+}
+
+void AudioHandler::RemoveTailProcessingDebug() {
+ TailProcessingDebug("remTail");
+}
+#endif // DEBUG_AUDIONODE_REFERENCES > 1
+
+void AudioHandler::UpdateChannelCountMode() {
+ channel_count_mode_ = new_channel_count_mode_;
+ UpdateChannelsForInputs();
+}
+
+void AudioHandler::UpdateChannelInterpretation() {
+ channel_interpretation_ = new_channel_interpretation_;
+}
+
+unsigned AudioHandler::NumberOfOutputChannels() const {
+ // This should only be called for ScriptProcessorNodes which are the only
+ // nodes where you can have an output with 0 channels. All other nodes have
+ // have at least one output channel, so there's no reason other nodes should
+ // ever call this function.
+ DCHECK(0) << "numberOfOutputChannels() not valid for node type "
+ << GetNodeType();
+ return 1;
+}
+// ----------------------------------------------------------------
+
+AudioNode::AudioNode(BaseAudioContext& context)
+ : context_(context), handler_(nullptr) {}
+
+void AudioNode::Dispose() {
+ DCHECK(IsMainThread());
+#if DEBUG_AUDIONODE_REFERENCES
+ fprintf(stderr, "[%16p]: %16p: %2d: AudioNode::dispose %16p\n", context(),
+ this, Handler().GetNodeType(), handler_.get());
+#endif
+ BaseAudioContext::GraphAutoLocker locker(context());
+ Handler().Dispose();
+
+ if (context()->HasRealtimeConstraint()) {
+ // Add the handler to the orphan list if the context is not
+ // closed. (Nothing will clean up the orphan list if the context
+ // is closed.) These will get cleaned up in the post render task
+ // if audio thread is running or when the context is colleced (in
+ // the worst case).
+ if (context()->ContextState() != BaseAudioContext::kClosed) {
+ context()->GetDeferredTaskHandler().AddRenderingOrphanHandler(
+ std::move(handler_));
+ }
+ } else {
+ // For an offline context, only need to save the handler when the
+ // context is running. The change in the context state is
+ // synchronous with the main thread (even though the offline
+ // thread is not synchronized to the main thread).
+ if (context()->ContextState() == BaseAudioContext::kRunning) {
+ context()->GetDeferredTaskHandler().AddRenderingOrphanHandler(
+ std::move(handler_));
+ }
+ }
+}
+
+void AudioNode::SetHandler(scoped_refptr<AudioHandler> handler) {
+ DCHECK(handler);
+ handler_ = std::move(handler);
+
+#if DEBUG_AUDIONODE_REFERENCES
+ fprintf(stderr, "[%16p]: %16p: %2d: AudioNode::AudioNode %16p\n", context(),
+ this, handler_->GetNodeType(), handler_.get());
+#endif
+}
+
+AudioHandler& AudioNode::Handler() const {
+ return *handler_;
+}
+
+void AudioNode::Trace(blink::Visitor* visitor) {
+ visitor->Trace(context_);
+ visitor->Trace(connected_nodes_);
+ visitor->Trace(connected_params_);
+ EventTargetWithInlineData::Trace(visitor);
+}
+
+void AudioNode::HandleChannelOptions(const AudioNodeOptions& options,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ if (options.hasChannelCount())
+ setChannelCount(options.channelCount(), exception_state);
+ if (options.hasChannelCountMode())
+ setChannelCountMode(options.channelCountMode(), exception_state);
+ if (options.hasChannelInterpretation())
+ setChannelInterpretation(options.channelInterpretation(), exception_state);
+}
+
+BaseAudioContext* AudioNode::context() const {
+ return context_;
+}
+
+AudioNode* AudioNode::connect(AudioNode* destination,
+ unsigned output_index,
+ unsigned input_index,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+ BaseAudioContext::GraphAutoLocker locker(context());
+
+ if (context()->IsContextClosed()) {
+ exception_state.ThrowDOMException(
+ kInvalidStateError,
+ "Cannot connect after the context has been closed.");
+ return nullptr;
+ }
+
+ if (!destination) {
+ exception_state.ThrowDOMException(kSyntaxError,
+ "invalid destination node.");
+ return nullptr;
+ }
+
+ // Sanity check input and output indices.
+ if (output_index >= numberOfOutputs()) {
+ exception_state.ThrowDOMException(
+ kIndexSizeError, "output index (" + String::Number(output_index) +
+ ") exceeds number of outputs (" +
+ String::Number(numberOfOutputs()) + ").");
+ return nullptr;
+ }
+
+ if (destination && input_index >= destination->numberOfInputs()) {
+ exception_state.ThrowDOMException(
+ kIndexSizeError, "input index (" + String::Number(input_index) +
+ ") exceeds number of inputs (" +
+ String::Number(destination->numberOfInputs()) +
+ ").");
+ return nullptr;
+ }
+
+ if (context() != destination->context()) {
+ exception_state.ThrowDOMException(
+ kInvalidAccessError,
+ "cannot connect to a destination "
+ "belonging to a different audio context.");
+ return nullptr;
+ }
+
+ // ScriptProcessorNodes with 0 output channels can't be connected to any
+ // destination. If there are no output channels, what would the destination
+ // receive? Just disallow this.
+ if (Handler().GetNodeType() == AudioHandler::kNodeTypeScriptProcessor &&
+ Handler().NumberOfOutputChannels() == 0) {
+ exception_state.ThrowDOMException(kInvalidAccessError,
+ "cannot connect a ScriptProcessorNode "
+ "with 0 output channels to any "
+ "destination node.");
+ return nullptr;
+ }
+
+ destination->Handler()
+ .Input(input_index)
+ .Connect(Handler().Output(output_index));
+ if (!connected_nodes_[output_index])
+ connected_nodes_[output_index] = new HeapHashSet<Member<AudioNode>>();
+ connected_nodes_[output_index]->insert(destination);
+
+ // Let context know that a connection has been made.
+ context()->IncrementConnectionCount();
+
+ return destination;
+}
+
+void AudioNode::connect(AudioParam* param,
+ unsigned output_index,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+ BaseAudioContext::GraphAutoLocker locker(context());
+
+ if (context()->IsContextClosed()) {
+ exception_state.ThrowDOMException(
+ kInvalidStateError,
+ "Cannot connect after the context has been closed.");
+ return;
+ }
+
+ if (!param) {
+ exception_state.ThrowDOMException(kSyntaxError, "invalid AudioParam.");
+ return;
+ }
+
+ if (output_index >= numberOfOutputs()) {
+ exception_state.ThrowDOMException(
+ kIndexSizeError, "output index (" + String::Number(output_index) +
+ ") exceeds number of outputs (" +
+ String::Number(numberOfOutputs()) + ").");
+ return;
+ }
+
+ if (context() != param->Context()) {
+ exception_state.ThrowDOMException(
+ kSyntaxError,
+ "cannot connect to an AudioParam "
+ "belonging to a different audio context.");
+ return;
+ }
+
+ param->Handler().Connect(Handler().Output(output_index));
+ if (!connected_params_[output_index])
+ connected_params_[output_index] = new HeapHashSet<Member<AudioParam>>();
+ connected_params_[output_index]->insert(param);
+}
+
+void AudioNode::DisconnectAllFromOutput(unsigned output_index) {
+ Handler().Output(output_index).DisconnectAll();
+ connected_nodes_[output_index] = nullptr;
+ connected_params_[output_index] = nullptr;
+}
+
+bool AudioNode::DisconnectFromOutputIfConnected(
+ unsigned output_index,
+ AudioNode& destination,
+ unsigned input_index_of_destination) {
+ AudioNodeOutput& output = Handler().Output(output_index);
+ AudioNodeInput& input =
+ destination.Handler().Input(input_index_of_destination);
+ if (!output.IsConnectedToInput(input))
+ return false;
+ output.DisconnectInput(input);
+ connected_nodes_[output_index]->erase(&destination);
+ return true;
+}
+
+bool AudioNode::DisconnectFromOutputIfConnected(unsigned output_index,
+ AudioParam& param) {
+ AudioNodeOutput& output = Handler().Output(output_index);
+ if (!output.IsConnectedToAudioParam(param.Handler()))
+ return false;
+ output.DisconnectAudioParam(param.Handler());
+ connected_params_[output_index]->erase(&param);
+ return true;
+}
+
+void AudioNode::disconnect() {
+ DCHECK(IsMainThread());
+ BaseAudioContext::GraphAutoLocker locker(context());
+
+ // Disconnect all outgoing connections.
+ for (unsigned i = 0; i < numberOfOutputs(); ++i)
+ DisconnectAllFromOutput(i);
+}
+
+void AudioNode::disconnect(unsigned output_index,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+ BaseAudioContext::GraphAutoLocker locker(context());
+
+ // Sanity check on the output index.
+ if (output_index >= numberOfOutputs()) {
+ exception_state.ThrowDOMException(
+ kIndexSizeError,
+ ExceptionMessages::IndexOutsideRange(
+ "output index", output_index, 0u,
+ ExceptionMessages::kInclusiveBound, numberOfOutputs() - 1,
+ ExceptionMessages::kInclusiveBound));
+ return;
+ }
+ // Disconnect all outgoing connections from the given output.
+ DisconnectAllFromOutput(output_index);
+}
+
+void AudioNode::disconnect(AudioNode* destination,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+ BaseAudioContext::GraphAutoLocker locker(context());
+
+ unsigned number_of_disconnections = 0;
+
+ // FIXME: Can this be optimized? ChannelSplitter and ChannelMerger can have
+ // 32 ports and that requires 1024 iterations to validate entire connections.
+ for (unsigned output_index = 0; output_index < numberOfOutputs();
+ ++output_index) {
+ for (unsigned input_index = 0;
+ input_index < destination->Handler().NumberOfInputs(); ++input_index) {
+ if (DisconnectFromOutputIfConnected(output_index, *destination,
+ input_index))
+ number_of_disconnections++;
+ }
+ }
+
+ // If there is no connection to the destination, throw an exception.
+ if (number_of_disconnections == 0) {
+ exception_state.ThrowDOMException(
+ kInvalidAccessError, "the given destination is not connected.");
+ return;
+ }
+}
+
+void AudioNode::disconnect(AudioNode* destination,
+ unsigned output_index,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+ BaseAudioContext::GraphAutoLocker locker(context());
+
+ if (output_index >= numberOfOutputs()) {
+ // The output index is out of range. Throw an exception.
+ exception_state.ThrowDOMException(
+ kIndexSizeError,
+ ExceptionMessages::IndexOutsideRange(
+ "output index", output_index, 0u,
+ ExceptionMessages::kInclusiveBound, numberOfOutputs() - 1,
+ ExceptionMessages::kInclusiveBound));
+ return;
+ }
+
+ // If the output index is valid, proceed to disconnect.
+ unsigned number_of_disconnections = 0;
+ // Sanity check on destination inputs and disconnect when possible.
+ for (unsigned input_index = 0; input_index < destination->numberOfInputs();
+ ++input_index) {
+ if (DisconnectFromOutputIfConnected(output_index, *destination,
+ input_index))
+ number_of_disconnections++;
+ }
+
+ // If there is no connection to the destination, throw an exception.
+ if (number_of_disconnections == 0) {
+ exception_state.ThrowDOMException(
+ kInvalidAccessError,
+ "output (" + String::Number(output_index) +
+ ") is not connected to the given destination.");
+ }
+}
+
+void AudioNode::disconnect(AudioNode* destination,
+ unsigned output_index,
+ unsigned input_index,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+ BaseAudioContext::GraphAutoLocker locker(context());
+
+ if (output_index >= numberOfOutputs()) {
+ exception_state.ThrowDOMException(
+ kIndexSizeError,
+ ExceptionMessages::IndexOutsideRange(
+ "output index", output_index, 0u,
+ ExceptionMessages::kInclusiveBound, numberOfOutputs() - 1,
+ ExceptionMessages::kInclusiveBound));
+ return;
+ }
+
+ if (input_index >= destination->Handler().NumberOfInputs()) {
+ exception_state.ThrowDOMException(
+ kIndexSizeError,
+ ExceptionMessages::IndexOutsideRange(
+ "input index", input_index, 0u, ExceptionMessages::kInclusiveBound,
+ destination->numberOfInputs() - 1,
+ ExceptionMessages::kInclusiveBound));
+ return;
+ }
+
+ // If both indices are valid, proceed to disconnect.
+ if (!DisconnectFromOutputIfConnected(output_index, *destination,
+ input_index)) {
+ exception_state.ThrowDOMException(
+ kInvalidAccessError, "output (" + String::Number(output_index) +
+ ") is not connected to the input (" +
+ String::Number(input_index) +
+ ") of the destination.");
+ return;
+ }
+}
+
+void AudioNode::disconnect(AudioParam* destination_param,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+ BaseAudioContext::GraphAutoLocker locker(context());
+
+ // The number of disconnection made.
+ unsigned number_of_disconnections = 0;
+
+ // Check if the node output is connected the destination AudioParam.
+ // Disconnect if connected and increase |numberOfDisconnectios| by 1.
+ for (unsigned output_index = 0; output_index < Handler().NumberOfOutputs();
+ ++output_index) {
+ if (DisconnectFromOutputIfConnected(output_index, *destination_param))
+ number_of_disconnections++;
+ }
+
+ // Throw an exception when there is no valid connection to the destination.
+ if (number_of_disconnections == 0) {
+ exception_state.ThrowDOMException(kInvalidAccessError,
+ "the given AudioParam is not connected.");
+ return;
+ }
+}
+
+void AudioNode::disconnect(AudioParam* destination_param,
+ unsigned output_index,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+ BaseAudioContext::GraphAutoLocker locker(context());
+
+ if (output_index >= Handler().NumberOfOutputs()) {
+ // The output index is out of range. Throw an exception.
+ exception_state.ThrowDOMException(
+ kIndexSizeError,
+ ExceptionMessages::IndexOutsideRange(
+ "output index", output_index, 0u,
+ ExceptionMessages::kInclusiveBound, numberOfOutputs() - 1,
+ ExceptionMessages::kInclusiveBound));
+ return;
+ }
+
+ // If the output index is valid, proceed to disconnect.
+ if (!DisconnectFromOutputIfConnected(output_index, *destination_param)) {
+ exception_state.ThrowDOMException(
+ kInvalidAccessError,
+ "specified destination AudioParam and node output (" +
+ String::Number(output_index) + ") are not connected.");
+ return;
+ }
+}
+
+void AudioNode::DisconnectWithoutException(unsigned output_index) {
+ DCHECK(IsMainThread());
+ BaseAudioContext::GraphAutoLocker locker(context());
+
+ // Sanity check input and output indices.
+ if (output_index >= Handler().NumberOfOutputs())
+ return;
+ DisconnectAllFromOutput(output_index);
+}
+
+unsigned AudioNode::numberOfInputs() const {
+ return Handler().NumberOfInputs();
+}
+
+unsigned AudioNode::numberOfOutputs() const {
+ return Handler().NumberOfOutputs();
+}
+
+unsigned long AudioNode::channelCount() const {
+ return Handler().ChannelCount();
+}
+
+void AudioNode::setChannelCount(unsigned long count,
+ ExceptionState& exception_state) {
+ Handler().SetChannelCount(count, exception_state);
+}
+
+String AudioNode::channelCountMode() const {
+ return Handler().GetChannelCountMode();
+}
+
+void AudioNode::setChannelCountMode(const String& mode,
+ ExceptionState& exception_state) {
+ Handler().SetChannelCountMode(mode, exception_state);
+}
+
+String AudioNode::channelInterpretation() const {
+ return Handler().ChannelInterpretation();
+}
+
+void AudioNode::setChannelInterpretation(const String& interpretation,
+ ExceptionState& exception_state) {
+ Handler().SetChannelInterpretation(interpretation, exception_state);
+}
+
+const AtomicString& AudioNode::InterfaceName() const {
+ return EventTargetNames::AudioNode;
+}
+
+ExecutionContext* AudioNode::GetExecutionContext() const {
+ return context()->GetExecutionContext();
+}
+
+void AudioNode::DidAddOutput(unsigned number_of_outputs) {
+ connected_nodes_.push_back(nullptr);
+ DCHECK_EQ(number_of_outputs, connected_nodes_.size());
+ connected_params_.push_back(nullptr);
+ DCHECK_EQ(number_of_outputs, connected_params_.size());
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_node.h b/chromium/third_party/blink/renderer/modules/webaudio/audio_node.h
new file mode 100644
index 00000000000..6c69e2411d6
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_node.h
@@ -0,0 +1,387 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_NODE_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_NODE_H_
+
+#include <memory>
+#include "base/memory/scoped_refptr.h"
+#include "third_party/blink/renderer/modules/event_target_modules.h"
+#include "third_party/blink/renderer/modules/modules_export.h"
+#include "third_party/blink/renderer/platform/audio/audio_bus.h"
+#include "third_party/blink/renderer/platform/audio/audio_utilities.h"
+#include "third_party/blink/renderer/platform/wtf/forward.h"
+#include "third_party/blink/renderer/platform/wtf/thread_safe_ref_counted.h"
+#include "third_party/blink/renderer/platform/wtf/vector.h"
+
+// Higher values produce more debugging output.
+#define DEBUG_AUDIONODE_REFERENCES 0
+
+namespace blink {
+
+class BaseAudioContext;
+class AudioNode;
+class AudioNodeOptions;
+class AudioNodeInput;
+class AudioNodeOutput;
+class AudioParam;
+class ExceptionState;
+
+// An AudioNode is the basic building block for handling audio within an
+// BaseAudioContext. It may be an audio source, an intermediate processing
+// module, or an audio destination. Each AudioNode can have inputs and/or
+// outputs. An AudioSourceNode has no inputs and a single output.
+// An AudioDestinationNode has one input and no outputs and represents the final
+// destination to the audio hardware. Most processing nodes such as filters
+// will have one input and one output, although multiple inputs and outputs are
+// possible.
+
+// Each of AudioNode objects owns its dedicated AudioHandler object. AudioNode
+// is responsible to provide IDL-accessible interface and its lifetime is
+// managed by Oilpan GC. AudioHandler is responsible for anything else. We must
+// not touch AudioNode objects in an audio rendering thread.
+
+// AudioHandler is created and owned by an AudioNode almost all the time. When
+// the AudioNode is about to die, the ownership of its AudioHandler is
+// transferred to DeferredTaskHandler, and it does deref the AudioHandler on the
+// main thread.
+//
+// Be careful to avoid reference cycles. If an AudioHandler has a reference
+// cycle including the owner AudioNode, objects in the cycle are never
+// collected.
+class MODULES_EXPORT AudioHandler : public ThreadSafeRefCounted<AudioHandler> {
+ public:
+ enum NodeType {
+ kNodeTypeUnknown = 0,
+ kNodeTypeDestination = 1,
+ kNodeTypeOscillator = 2,
+ kNodeTypeAudioBufferSource = 3,
+ kNodeTypeMediaElementAudioSource = 4,
+ kNodeTypeMediaStreamAudioDestination = 5,
+ kNodeTypeMediaStreamAudioSource = 6,
+ kNodeTypeScriptProcessor = 7,
+ kNodeTypeBiquadFilter = 8,
+ kNodeTypePanner = 9,
+ kNodeTypeStereoPanner = 10,
+ kNodeTypeConvolver = 11,
+ kNodeTypeDelay = 12,
+ kNodeTypeGain = 13,
+ kNodeTypeChannelSplitter = 14,
+ kNodeTypeChannelMerger = 15,
+ kNodeTypeAnalyser = 16,
+ kNodeTypeDynamicsCompressor = 17,
+ kNodeTypeWaveShaper = 18,
+ kNodeTypeIIRFilter = 19,
+ kNodeTypeConstantSource = 20,
+ kNodeTypeAudioWorklet = 21,
+ kNodeTypeEnd = 22
+ };
+
+ AudioHandler(NodeType, AudioNode&, float sample_rate);
+ virtual ~AudioHandler();
+ // dispose() is called when the owner AudioNode is about to be
+ // destructed. This must be called in the main thread, and while the graph
+ // lock is held.
+ // Do not release resources used by an audio rendering thread in dispose().
+ virtual void Dispose();
+
+ // GetNode() returns a valid object until dispose() is called. This returns
+ // nullptr after dispose(). We must not call GetNode() in an audio rendering
+ // thread.
+ AudioNode* GetNode() const;
+ // context() returns a valid object until the BaseAudioContext dies, and
+ // returns nullptr otherwise. This always returns a valid object in an audio
+ // rendering thread, and inside dispose(). We must not call context() in the
+ // destructor.
+ virtual BaseAudioContext* Context() const;
+ void ClearContext() { context_ = nullptr; }
+
+ enum ChannelCountMode { kMax, kClampedMax, kExplicit };
+
+ NodeType GetNodeType() const { return node_type_; }
+ String NodeTypeName() const;
+
+ // This object has been connected to another object. This might have
+ // existing connections from others.
+ // This function must be called after acquiring a connection reference.
+ void MakeConnection();
+ // This object will be disconnected from another object. This might have
+ // remaining connections from others.
+ // This function must be called before releasing a connection reference.
+ void BreakConnection();
+
+ // Can be called from main thread or context's audio thread. It must be
+ // called while the context's graph lock is held.
+ void BreakConnectionWithLock();
+
+ // The AudioNodeInput(s) (if any) will already have their input data available
+ // when process() is called. Subclasses will take this input data and put the
+ // results in the AudioBus(s) of its AudioNodeOutput(s) (if any).
+ // Called from context's audio thread.
+ virtual void Process(size_t frames_to_process) = 0;
+
+ // Like process(), but only causes the automations to process; the
+ // normal processing of the node is bypassed. By default, we assume
+ // no AudioParams need to be updated.
+ virtual void ProcessOnlyAudioParams(size_t frames_to_process){};
+
+ // No significant resources should be allocated until initialize() is called.
+ // Processing may not occur until a node is initialized.
+ virtual void Initialize();
+ virtual void Uninitialize();
+
+ bool IsInitialized() const { return is_initialized_; }
+
+ unsigned NumberOfInputs() const { return inputs_.size(); }
+ unsigned NumberOfOutputs() const { return outputs_.size(); }
+
+ // Number of output channels. This only matters for ScriptProcessorNodes.
+ virtual unsigned NumberOfOutputChannels() const;
+
+ // The argument must be less than numberOfInputs().
+ AudioNodeInput& Input(unsigned);
+ // The argument must be less than numberOfOutputs().
+ AudioNodeOutput& Output(unsigned);
+
+ // processIfNecessary() is called by our output(s) when the rendering graph
+ // needs this AudioNode to process. This method ensures that the AudioNode
+ // will only process once per rendering time quantum even if it's called
+ // repeatedly. This handles the case of "fanout" where an output is connected
+ // to multiple AudioNode inputs. Called from context's audio thread.
+ void ProcessIfNecessary(size_t frames_to_process);
+
+ // Called when a new connection has been made to one of our inputs or the
+ // connection number of channels has changed. This potentially gives us
+ // enough information to perform a lazy initialization or, if necessary, a
+ // re-initialization. Called from main thread.
+ virtual void CheckNumberOfChannelsForInput(AudioNodeInput*);
+
+#if DEBUG_AUDIONODE_REFERENCES
+ static void PrintNodeCounts();
+#endif
+#if DEBUG_AUDIONODE_REFERENCES > 1
+ void TailProcessingDebug(const char* debug_note);
+ void AddTailProcessingDebug();
+ void RemoveTailProcessingDebug();
+#endif
+
+ // True if the node has a tail time or latency time that requires
+ // special tail processing to behave properly. Ideally, this can be
+ // checked using TailTime and LatencyTime, but these aren't
+ // available on the main thread, and the tail processing check can
+ // happen on the main thread.
+ virtual bool RequiresTailProcessing() const = 0;
+
+ // TailTime() is the length of time (not counting latency time) where
+ // non-zero output may occur after continuous silent input.
+ virtual double TailTime() const = 0;
+
+ // LatencyTime() is the length of time it takes for non-zero output to
+ // appear after non-zero input is provided. This only applies to processing
+ // delay which is an artifact of the processing algorithm chosen and is
+ // *not* part of the intrinsic desired effect. For example, a "delay" effect
+ // is expected to delay the signal, and thus would not be considered
+ // latency.
+ virtual double LatencyTime() const = 0;
+
+ // PropagatesSilence() should return true if the node will generate silent
+ // output when given silent input. By default, AudioNode will take TailTime()
+ // and LatencyTime() into account when determining whether the node will
+ // propagate silence.
+ virtual bool PropagatesSilence() const;
+ bool InputsAreSilent();
+ void SilenceOutputs();
+ void UnsilenceOutputs();
+
+ void EnableOutputsIfNecessary();
+ void DisableOutputsIfNecessary();
+ void DisableOutputs();
+
+ unsigned long ChannelCount();
+ virtual void SetChannelCount(unsigned long, ExceptionState&);
+
+ String GetChannelCountMode();
+ virtual void SetChannelCountMode(const String&, ExceptionState&);
+
+ String ChannelInterpretation();
+ virtual void SetChannelInterpretation(const String&, ExceptionState&);
+
+ ChannelCountMode InternalChannelCountMode() const {
+ return channel_count_mode_;
+ }
+ AudioBus::ChannelInterpretation InternalChannelInterpretation() const {
+ return channel_interpretation_;
+ }
+
+ void UpdateChannelCountMode();
+ void UpdateChannelInterpretation();
+
+ // Default callbackBufferSize should be the render quantum size
+ virtual size_t CallbackBufferSize() const {
+ return AudioUtilities::kRenderQuantumFrames;
+ }
+
+ protected:
+ // Inputs and outputs must be created before the AudioHandler is
+ // initialized.
+ void AddInput();
+ void AddOutput(unsigned number_of_channels);
+
+ // Called by processIfNecessary() to cause all parts of the rendering graph
+ // connected to us to process. Each rendering quantum, the audio data for
+ // each of the AudioNode's inputs will be available after this method is
+ // called. Called from context's audio thread.
+ virtual void PullInputs(size_t frames_to_process);
+
+ // Force all inputs to take any channel interpretation changes into account.
+ void UpdateChannelsForInputs();
+
+ private:
+ void SetNodeType(NodeType);
+
+ volatile bool is_initialized_;
+ NodeType node_type_;
+
+ // The owner AudioNode. This untraced member is safe because dispose() is
+ // called before the AudioNode death, and it clears |node_|. Do not access
+ // |node_| directly, use GetNode() instead.
+ // See http://crbug.com/404527 for the detail.
+ UntracedMember<AudioNode> node_;
+
+ // This untraced member is safe because this is cleared for all of live
+ // AudioHandlers when the BaseAudioContext dies. Do not access m_context
+ // directly, use context() instead.
+ // See http://crbug.com/404527 for the detail.
+ UntracedMember<BaseAudioContext> context_;
+
+ Vector<std::unique_ptr<AudioNodeInput>> inputs_;
+ Vector<std::unique_ptr<AudioNodeOutput>> outputs_;
+
+ double last_processing_time_;
+ double last_non_silent_time_;
+
+ volatile int connection_ref_count_;
+
+ bool is_disabled_;
+
+#if DEBUG_AUDIONODE_REFERENCES
+ static bool is_node_count_initialized_;
+ static int node_count_[kNodeTypeEnd];
+#endif
+
+ ChannelCountMode channel_count_mode_;
+ AudioBus::ChannelInterpretation channel_interpretation_;
+
+ protected:
+ // Set the (internal) channelCountMode and channelInterpretation
+ // accordingly. Use this in the node constructors to set the internal state
+ // correctly if the node uses values different from the defaults.
+ void SetInternalChannelCountMode(ChannelCountMode);
+ void SetInternalChannelInterpretation(AudioBus::ChannelInterpretation);
+
+ unsigned channel_count_;
+ // The new channel count mode that will be used to set the actual mode in the
+ // pre or post rendering phase.
+ ChannelCountMode new_channel_count_mode_;
+ // The new channel interpretation that will be used to set the actual
+ // intepretation in the pre or post rendering phase.
+ AudioBus::ChannelInterpretation new_channel_interpretation_;
+};
+
+class MODULES_EXPORT AudioNode : public EventTargetWithInlineData {
+ DEFINE_WRAPPERTYPEINFO();
+ USING_PRE_FINALIZER(AudioNode, Dispose);
+
+ public:
+ virtual void Trace(blink::Visitor*);
+ AudioHandler& Handler() const;
+
+ void HandleChannelOptions(const AudioNodeOptions&, ExceptionState&);
+
+ virtual AudioNode* connect(AudioNode*,
+ unsigned output_index,
+ unsigned input_index,
+ ExceptionState&);
+ void connect(AudioParam*, unsigned output_index, ExceptionState&);
+ void disconnect();
+ virtual void disconnect(unsigned output_index, ExceptionState&);
+ void disconnect(AudioNode*, ExceptionState&);
+ void disconnect(AudioNode*, unsigned output_index, ExceptionState&);
+ void disconnect(AudioNode*,
+ unsigned output_index,
+ unsigned input_index,
+ ExceptionState&);
+ void disconnect(AudioParam*, ExceptionState&);
+ void disconnect(AudioParam*, unsigned output_index, ExceptionState&);
+ BaseAudioContext* context() const;
+ unsigned numberOfInputs() const;
+ unsigned numberOfOutputs() const;
+ unsigned long channelCount() const;
+ void setChannelCount(unsigned long, ExceptionState&);
+ String channelCountMode() const;
+ void setChannelCountMode(const String&, ExceptionState&);
+ String channelInterpretation() const;
+ void setChannelInterpretation(const String&, ExceptionState&);
+
+ // EventTarget
+ const AtomicString& InterfaceName() const final;
+ ExecutionContext* GetExecutionContext() const final;
+
+ // Called inside AudioHandler constructors.
+ void DidAddOutput(unsigned number_of_outputs);
+ // Like disconnect, but no exception is thrown if the outputIndex is invalid.
+ // Just do nothing in that case.
+ void DisconnectWithoutException(unsigned output_index);
+
+ protected:
+ explicit AudioNode(BaseAudioContext&);
+ // This should be called in a constructor.
+ void SetHandler(scoped_refptr<AudioHandler>);
+
+ private:
+ void Dispose();
+ void DisconnectAllFromOutput(unsigned output_index);
+ // Returns true if the specified AudioNodeInput was connected.
+ bool DisconnectFromOutputIfConnected(unsigned output_index,
+ AudioNode& destination,
+ unsigned input_index_of_destination);
+ // Returns true if the specified AudioParam was connected.
+ bool DisconnectFromOutputIfConnected(unsigned output_index, AudioParam&);
+
+ Member<BaseAudioContext> context_;
+ scoped_refptr<AudioHandler> handler_;
+ // Represents audio node graph with Oilpan references. N-th HeapHashSet
+ // represents a set of AudioNode objects connected to this AudioNode's N-th
+ // output.
+ HeapVector<Member<HeapHashSet<Member<AudioNode>>>> connected_nodes_;
+ // Represents audio node graph with Oilpan references. N-th HeapHashSet
+ // represents a set of AudioParam objects connected to this AudioNode's N-th
+ // output.
+ HeapVector<Member<HeapHashSet<Member<AudioParam>>>> connected_params_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_NODE_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_node.idl b/chromium/third_party/blink/renderer/modules/webaudio/audio_node.idl
new file mode 100644
index 00000000000..3bf9da4be29
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_node.idl
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+// See https://webaudio.github.io/web-audio-api/#audionode
+
+enum ChannelCountMode {
+ "max",
+ "clamped-max",
+ "explicit"
+};
+
+enum ChannelInterpretation {
+ "speakers",
+ "discrete"
+};
+
+interface AudioNode : EventTarget {
+ [RaisesException, MeasureAs=AudioNodeConnectToAudioNode] AudioNode connect(AudioNode destination, optional unsigned long output = 0, optional unsigned long input = 0);
+ [RaisesException, MeasureAs=AudioNodeConnectToAudioParam] void connect(AudioParam destination, optional unsigned long output = 0);
+ void disconnect();
+ [RaisesException, MeasureAs=AudioNodeDisconnectFromAudioNode] void disconnect(unsigned long output);
+ [RaisesException, MeasureAs=AudioNodeDisconnectFromAudioNode] void disconnect(AudioNode destination);
+ [RaisesException, MeasureAs=AudioNodeDisconnectFromAudioNode] void disconnect(AudioNode destination, unsigned long output);
+ [RaisesException, MeasureAs=AudioNodeDisconnectFromAudioNode] void disconnect(AudioNode destination, unsigned long output, unsigned long input);
+ [RaisesException, MeasureAs=AudioNodeDisconnectFromAudioParam] void disconnect(AudioParam destination);
+ [RaisesException, MeasureAs=AudioNodeDisconnectFromAudioParam] void disconnect(AudioParam destination, unsigned long output);
+ readonly attribute BaseAudioContext context;
+ readonly attribute unsigned long numberOfInputs;
+ readonly attribute unsigned long numberOfOutputs;
+ [RaisesException=Setter] attribute unsigned long channelCount;
+ [RaisesException=Setter] attribute ChannelCountMode channelCountMode;
+ [RaisesException=Setter] attribute ChannelInterpretation channelInterpretation;
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_node_input.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_node_input.cc
new file mode 100644
index 00000000000..e2124a84b7c
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_node_input.cc
@@ -0,0 +1,228 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#include "third_party/blink/renderer/modules/webaudio/audio_node_input.h"
+
+#include <algorithm>
+#include <memory>
+
+#include "base/memory/ptr_util.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node_output.h"
+
+namespace blink {
+
+inline AudioNodeInput::AudioNodeInput(AudioHandler& handler)
+ : AudioSummingJunction(handler.Context()->GetDeferredTaskHandler()),
+ handler_(handler) {
+ // Set to mono by default.
+ internal_summing_bus_ =
+ AudioBus::Create(1, AudioUtilities::kRenderQuantumFrames);
+}
+
+std::unique_ptr<AudioNodeInput> AudioNodeInput::Create(AudioHandler& handler) {
+ return base::WrapUnique(new AudioNodeInput(handler));
+}
+
+void AudioNodeInput::Connect(AudioNodeOutput& output) {
+ DCHECK(GetDeferredTaskHandler().IsGraphOwner());
+
+ // Check if we're already connected to this output.
+ if (outputs_.Contains(&output))
+ return;
+
+ output.AddInput(*this);
+ outputs_.insert(&output);
+ ChangedOutputs();
+}
+
+void AudioNodeInput::Disconnect(AudioNodeOutput& output) {
+ DCHECK(GetDeferredTaskHandler().IsGraphOwner());
+
+ // First try to disconnect from "active" connections.
+ if (outputs_.Contains(&output)) {
+ outputs_.erase(&output);
+ ChangedOutputs();
+ output.RemoveInput(*this);
+ // Note: it's important to return immediately after removeInput() calls
+ // since the node may be deleted.
+ return;
+ }
+
+ // Otherwise, try to disconnect from disabled connections.
+ if (disabled_outputs_.Contains(&output)) {
+ disabled_outputs_.erase(&output);
+ output.RemoveInput(*this);
+ // Note: it's important to return immediately after all removeInput() calls
+ // since the node may be deleted.
+ return;
+ }
+
+ NOTREACHED();
+}
+
+void AudioNodeInput::Disable(AudioNodeOutput& output) {
+ DCHECK(GetDeferredTaskHandler().IsGraphOwner());
+ DCHECK(outputs_.Contains(&output));
+
+ disabled_outputs_.insert(&output);
+ outputs_.erase(&output);
+ ChangedOutputs();
+
+ // Propagate disabled state to outputs.
+ Handler().DisableOutputsIfNecessary();
+}
+
+void AudioNodeInput::Enable(AudioNodeOutput& output) {
+ DCHECK(GetDeferredTaskHandler().IsGraphOwner());
+
+ // Move output from disabled list to active list.
+ outputs_.insert(&output);
+ if (disabled_outputs_.size() > 0) {
+ DCHECK(disabled_outputs_.Contains(&output));
+ disabled_outputs_.erase(&output);
+ }
+ ChangedOutputs();
+
+ // Propagate enabled state to outputs.
+ Handler().EnableOutputsIfNecessary();
+}
+
+void AudioNodeInput::DidUpdate() {
+ Handler().CheckNumberOfChannelsForInput(this);
+}
+
+void AudioNodeInput::UpdateInternalBus() {
+ DCHECK(GetDeferredTaskHandler().IsAudioThread());
+ DCHECK(GetDeferredTaskHandler().IsGraphOwner());
+
+ unsigned number_of_input_channels = NumberOfChannels();
+
+ if (number_of_input_channels == internal_summing_bus_->NumberOfChannels())
+ return;
+
+ internal_summing_bus_ = AudioBus::Create(
+ number_of_input_channels, AudioUtilities::kRenderQuantumFrames);
+}
+
+unsigned AudioNodeInput::NumberOfChannels() const {
+ AudioHandler::ChannelCountMode mode = Handler().InternalChannelCountMode();
+ if (mode == AudioHandler::kExplicit)
+ return Handler().ChannelCount();
+
+ // Find the number of channels of the connection with the largest number of
+ // channels.
+ unsigned max_channels = 1; // one channel is the minimum allowed
+
+ for (AudioNodeOutput* output : outputs_) {
+ // Use output()->numberOfChannels() instead of
+ // output->bus()->numberOfChannels(), because the calling of
+ // AudioNodeOutput::bus() is not safe here.
+ max_channels = std::max(max_channels, output->NumberOfChannels());
+ }
+
+ if (mode == AudioHandler::kClampedMax)
+ max_channels =
+ std::min(max_channels, static_cast<unsigned>(Handler().ChannelCount()));
+
+ return max_channels;
+}
+
+AudioBus* AudioNodeInput::Bus() {
+ DCHECK(GetDeferredTaskHandler().IsAudioThread());
+
+ // Handle single connection specially to allow for in-place processing.
+ if (NumberOfRenderingConnections() == 1 &&
+ Handler().InternalChannelCountMode() == AudioHandler::kMax)
+ return RenderingOutput(0)->Bus();
+
+ // Multiple connections case or complex ChannelCountMode (or no connections).
+ return InternalSummingBus();
+}
+
+AudioBus* AudioNodeInput::InternalSummingBus() {
+ DCHECK(GetDeferredTaskHandler().IsAudioThread());
+
+ return internal_summing_bus_.get();
+}
+
+void AudioNodeInput::SumAllConnections(AudioBus* summing_bus,
+ size_t frames_to_process) {
+ DCHECK(GetDeferredTaskHandler().IsAudioThread());
+
+ // We shouldn't be calling this method if there's only one connection, since
+ // it's less efficient.
+ // DCHECK(numberOfRenderingConnections() > 1 ||
+ // handler().internalChannelCountMode() != AudioHandler::Max);
+
+ DCHECK(summing_bus);
+ if (!summing_bus)
+ return;
+
+ summing_bus->Zero();
+
+ AudioBus::ChannelInterpretation interpretation =
+ Handler().InternalChannelInterpretation();
+
+ for (unsigned i = 0; i < NumberOfRenderingConnections(); ++i) {
+ AudioNodeOutput* output = RenderingOutput(i);
+ DCHECK(output);
+
+ // Render audio from this output.
+ AudioBus* connection_bus = output->Pull(nullptr, frames_to_process);
+
+ // Sum, with unity-gain.
+ summing_bus->SumFrom(*connection_bus, interpretation);
+ }
+}
+
+AudioBus* AudioNodeInput::Pull(AudioBus* in_place_bus,
+ size_t frames_to_process) {
+ DCHECK(GetDeferredTaskHandler().IsAudioThread());
+
+ // Handle single connection case.
+ if (NumberOfRenderingConnections() == 1 &&
+ Handler().InternalChannelCountMode() == AudioHandler::kMax) {
+ // The output will optimize processing using inPlaceBus if it's able.
+ AudioNodeOutput* output = this->RenderingOutput(0);
+ return output->Pull(in_place_bus, frames_to_process);
+ }
+
+ AudioBus* internal_summing_bus = this->InternalSummingBus();
+
+ if (!NumberOfRenderingConnections()) {
+ // At least, generate silence if we're not connected to anything.
+ // FIXME: if we wanted to get fancy, we could propagate a 'silent hint' here
+ // to optimize the downstream graph processing.
+ internal_summing_bus->Zero();
+ return internal_summing_bus;
+ }
+
+ // Handle multiple connections case.
+ SumAllConnections(internal_summing_bus, frames_to_process);
+
+ return internal_summing_bus;
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_node_input.h b/chromium/third_party/blink/renderer/modules/webaudio/audio_node_input.h
new file mode 100644
index 00000000000..85b5048bcb8
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_node_input.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_NODE_INPUT_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_NODE_INPUT_H_
+
+#include <memory>
+#include "third_party/blink/renderer/modules/webaudio/audio_node.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_summing_junction.h"
+#include "third_party/blink/renderer/platform/audio/audio_bus.h"
+#include "third_party/blink/renderer/platform/wtf/allocator.h"
+#include "third_party/blink/renderer/platform/wtf/hash_set.h"
+
+namespace blink {
+
+class AudioNodeOutput;
+
+// An AudioNodeInput represents an input to an AudioNode and can be connected
+// from one or more AudioNodeOutputs. In the case of multiple connections, the
+// input will act as a unity-gain summing junction, mixing all the outputs. The
+// number of channels of the input's bus is the maximum of the number of
+// channels of all its connections.
+
+class AudioNodeInput final : public AudioSummingJunction {
+ USING_FAST_MALLOC(AudioNodeInput);
+
+ public:
+ static std::unique_ptr<AudioNodeInput> Create(AudioHandler&);
+
+ // AudioSummingJunction
+ void DidUpdate() override;
+
+ // Can be called from any thread.
+ AudioHandler& Handler() const { return handler_; }
+
+ // Must be called with the context's graph lock.
+ void Connect(AudioNodeOutput&);
+ void Disconnect(AudioNodeOutput&);
+
+ // disable() will take the output out of the active connections list and set
+ // aside in a disabled list.
+ // enable() will put the output back into the active connections list.
+ // Must be called with the context's graph lock.
+ void Enable(AudioNodeOutput&);
+ void Disable(AudioNodeOutput&);
+
+ // pull() processes all of the AudioNodes connected to us.
+ // In the case of multiple connections it sums the result into an internal
+ // summing bus. In the single connection case, it allows in-place processing
+ // where possible using inPlaceBus. It returns the bus which it rendered
+ // into, returning inPlaceBus if in-place processing was performed.
+ // Called from context's audio thread.
+ AudioBus* Pull(AudioBus* in_place_bus, size_t frames_to_process);
+
+ // bus() contains the rendered audio after pull() has been called for each
+ // time quantum.
+ // Called from context's audio thread.
+ AudioBus* Bus();
+
+ // updateInternalBus() updates m_internalSummingBus appropriately for the
+ // number of channels. This must be called when we own the context's graph
+ // lock in the audio thread at the very start or end of the render quantum.
+ void UpdateInternalBus();
+
+ // The number of channels of the connection with the largest number of
+ // channels.
+ unsigned NumberOfChannels() const;
+
+ private:
+ explicit AudioNodeInput(AudioHandler&);
+
+ // This reference is safe because the AudioHandler owns this AudioNodeInput
+ // object.
+ AudioHandler& handler_;
+
+ // m_disabledOutputs contains the AudioNodeOutputs which are disabled (will
+ // not be processed) by the audio graph rendering. But, from JavaScript's
+ // perspective, these outputs are still connected to us.
+ // Generally, these represent disabled connections from "notes" which have
+ // finished playing but are not yet garbage collected.
+ // These raw pointers are safe. Owner AudioNodes of these AudioNodeOutputs
+ // manage their lifetime, and AudioNode::dispose() disconnects all of
+ // connections.
+ HashSet<AudioNodeOutput*> disabled_outputs_;
+
+ // Called from context's audio thread.
+ AudioBus* InternalSummingBus();
+ void SumAllConnections(AudioBus* summing_bus, size_t frames_to_process);
+
+ scoped_refptr<AudioBus> internal_summing_bus_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_NODE_INPUT_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_node_options.idl b/chromium/third_party/blink/renderer/modules/webaudio/audio_node_options.idl
new file mode 100644
index 00000000000..f14f9ed43db
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_node_options.idl
@@ -0,0 +1,10 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See https://webaudio.github.io/web-audio-api/#dictdef-audionodeoptions
+dictionary AudioNodeOptions {
+ unsigned long channelCount;
+ ChannelCountMode channelCountMode;
+ ChannelInterpretation channelInterpretation;
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_node_output.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_node_output.cc
new file mode 100644
index 00000000000..d3084c44bb6
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_node_output.cc
@@ -0,0 +1,250 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#include "third_party/blink/renderer/modules/webaudio/audio_node_output.h"
+
+#include <memory>
+
+#include "base/memory/ptr_util.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node_input.h"
+#include "third_party/blink/renderer/modules/webaudio/base_audio_context.h"
+#include "third_party/blink/renderer/platform/wtf/threading.h"
+
+namespace blink {
+
+inline AudioNodeOutput::AudioNodeOutput(AudioHandler* handler,
+ unsigned number_of_channels)
+ : handler_(*handler),
+ number_of_channels_(number_of_channels),
+ desired_number_of_channels_(number_of_channels),
+ is_in_place_(false),
+ is_enabled_(true),
+ did_call_dispose_(false),
+ rendering_fan_out_count_(0),
+ rendering_param_fan_out_count_(0) {
+ DCHECK_LE(number_of_channels, BaseAudioContext::MaxNumberOfChannels());
+
+ internal_bus_ = AudioBus::Create(number_of_channels,
+ AudioUtilities::kRenderQuantumFrames);
+}
+
+std::unique_ptr<AudioNodeOutput> AudioNodeOutput::Create(
+ AudioHandler* handler,
+ unsigned number_of_channels) {
+ return base::WrapUnique(new AudioNodeOutput(handler, number_of_channels));
+}
+
+void AudioNodeOutput::Dispose() {
+ did_call_dispose_ = true;
+
+ GetDeferredTaskHandler().RemoveMarkedAudioNodeOutput(this);
+ DisconnectAll();
+ DCHECK(inputs_.IsEmpty());
+ DCHECK(params_.IsEmpty());
+}
+
+void AudioNodeOutput::SetNumberOfChannels(unsigned number_of_channels) {
+ DCHECK_LE(number_of_channels, BaseAudioContext::MaxNumberOfChannels());
+ DCHECK(GetDeferredTaskHandler().IsGraphOwner());
+
+ desired_number_of_channels_ = number_of_channels;
+
+ if (GetDeferredTaskHandler().IsAudioThread()) {
+ // If we're in the audio thread then we can take care of it right away (we
+ // should be at the very start or end of a rendering quantum).
+ UpdateNumberOfChannels();
+ } else {
+ DCHECK(!did_call_dispose_);
+ // Let the context take care of it in the audio thread in the pre and post
+ // render tasks.
+ GetDeferredTaskHandler().MarkAudioNodeOutputDirty(this);
+ }
+}
+
+void AudioNodeOutput::UpdateInternalBus() {
+ if (NumberOfChannels() == internal_bus_->NumberOfChannels())
+ return;
+
+ internal_bus_ = AudioBus::Create(NumberOfChannels(),
+ AudioUtilities::kRenderQuantumFrames);
+}
+
+void AudioNodeOutput::UpdateRenderingState() {
+ UpdateNumberOfChannels();
+ rendering_fan_out_count_ = FanOutCount();
+ rendering_param_fan_out_count_ = ParamFanOutCount();
+}
+
+void AudioNodeOutput::UpdateNumberOfChannels() {
+ DCHECK(GetDeferredTaskHandler().IsAudioThread());
+ DCHECK(GetDeferredTaskHandler().IsGraphOwner());
+
+ if (number_of_channels_ != desired_number_of_channels_) {
+ number_of_channels_ = desired_number_of_channels_;
+ UpdateInternalBus();
+ PropagateChannelCount();
+ }
+}
+
+void AudioNodeOutput::PropagateChannelCount() {
+ DCHECK(GetDeferredTaskHandler().IsAudioThread());
+ DCHECK(GetDeferredTaskHandler().IsGraphOwner());
+
+ if (IsChannelCountKnown()) {
+ // Announce to any nodes we're connected to that we changed our channel
+ // count for its input.
+ for (AudioNodeInput* i : inputs_)
+ i->Handler().CheckNumberOfChannelsForInput(i);
+ }
+}
+
+AudioBus* AudioNodeOutput::Pull(AudioBus* in_place_bus,
+ size_t frames_to_process) {
+ DCHECK(GetDeferredTaskHandler().IsAudioThread());
+ DCHECK(rendering_fan_out_count_ > 0 || rendering_param_fan_out_count_ > 0);
+
+ // Causes our AudioNode to process if it hasn't already for this render
+ // quantum. We try to do in-place processing (using inPlaceBus) if at all
+ // possible, but we can't process in-place if we're connected to more than one
+ // input (fan-out > 1). In this case pull() is called multiple times per
+ // rendering quantum, and the processIfNecessary() call below will cause our
+ // node to process() only the first time, caching the output in
+ // m_internalOutputBus for subsequent calls.
+
+ is_in_place_ =
+ in_place_bus && in_place_bus->NumberOfChannels() == NumberOfChannels() &&
+ (rendering_fan_out_count_ + rendering_param_fan_out_count_) == 1;
+
+ in_place_bus_ = is_in_place_ ? in_place_bus : nullptr;
+
+ Handler().ProcessIfNecessary(frames_to_process);
+ return Bus();
+}
+
+AudioBus* AudioNodeOutput::Bus() const {
+ DCHECK(GetDeferredTaskHandler().IsAudioThread());
+ return is_in_place_ ? in_place_bus_.get() : internal_bus_.get();
+}
+
+unsigned AudioNodeOutput::FanOutCount() {
+ DCHECK(GetDeferredTaskHandler().IsGraphOwner());
+ return inputs_.size();
+}
+
+unsigned AudioNodeOutput::ParamFanOutCount() {
+ DCHECK(GetDeferredTaskHandler().IsGraphOwner());
+ return params_.size();
+}
+
+unsigned AudioNodeOutput::RenderingFanOutCount() const {
+ return rendering_fan_out_count_;
+}
+
+void AudioNodeOutput::AddInput(AudioNodeInput& input) {
+ DCHECK(GetDeferredTaskHandler().IsGraphOwner());
+ inputs_.insert(&input);
+ input.Handler().MakeConnection();
+}
+
+void AudioNodeOutput::RemoveInput(AudioNodeInput& input) {
+ DCHECK(GetDeferredTaskHandler().IsGraphOwner());
+ input.Handler().BreakConnection();
+ inputs_.erase(&input);
+}
+
+void AudioNodeOutput::DisconnectAllInputs() {
+ DCHECK(GetDeferredTaskHandler().IsGraphOwner());
+
+ // AudioNodeInput::disconnect() changes m_inputs by calling removeInput().
+ while (!inputs_.IsEmpty())
+ (*inputs_.begin())->Disconnect(*this);
+}
+
+void AudioNodeOutput::DisconnectInput(AudioNodeInput& input) {
+ DCHECK(GetDeferredTaskHandler().IsGraphOwner());
+ DCHECK(IsConnectedToInput(input));
+ input.Disconnect(*this);
+}
+
+void AudioNodeOutput::DisconnectAudioParam(AudioParamHandler& param) {
+ DCHECK(GetDeferredTaskHandler().IsGraphOwner());
+ DCHECK(IsConnectedToAudioParam(param));
+ param.Disconnect(*this);
+}
+
+void AudioNodeOutput::AddParam(AudioParamHandler& param) {
+ DCHECK(GetDeferredTaskHandler().IsGraphOwner());
+ params_.insert(&param);
+}
+
+void AudioNodeOutput::RemoveParam(AudioParamHandler& param) {
+ DCHECK(GetDeferredTaskHandler().IsGraphOwner());
+ params_.erase(&param);
+}
+
+void AudioNodeOutput::DisconnectAllParams() {
+ DCHECK(GetDeferredTaskHandler().IsGraphOwner());
+
+ // AudioParam::disconnect() changes m_params by calling removeParam().
+ while (!params_.IsEmpty())
+ (*params_.begin())->Disconnect(*this);
+}
+
+void AudioNodeOutput::DisconnectAll() {
+ DisconnectAllInputs();
+ DisconnectAllParams();
+}
+
+bool AudioNodeOutput::IsConnectedToInput(AudioNodeInput& input) {
+ DCHECK(GetDeferredTaskHandler().IsGraphOwner());
+ return inputs_.Contains(&input);
+}
+
+bool AudioNodeOutput::IsConnectedToAudioParam(AudioParamHandler& param) {
+ DCHECK(GetDeferredTaskHandler().IsGraphOwner());
+ return params_.Contains(&param);
+}
+
+void AudioNodeOutput::Disable() {
+ DCHECK(GetDeferredTaskHandler().IsGraphOwner());
+
+ if (is_enabled_) {
+ is_enabled_ = false;
+ for (AudioNodeInput* i : inputs_)
+ i->Disable(*this);
+ }
+}
+
+void AudioNodeOutput::Enable() {
+ DCHECK(GetDeferredTaskHandler().IsGraphOwner());
+
+ if (!is_enabled_) {
+ is_enabled_ = true;
+ for (AudioNodeInput* i : inputs_)
+ i->Enable(*this);
+ }
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_node_output.h b/chromium/third_party/blink/renderer/modules/webaudio/audio_node_output.h
new file mode 100644
index 00000000000..dd76a98dd6c
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_node_output.h
@@ -0,0 +1,186 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_NODE_OUTPUT_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_NODE_OUTPUT_H_
+
+#include <memory>
+#include "base/memory/scoped_refptr.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_param.h"
+#include "third_party/blink/renderer/platform/audio/audio_bus.h"
+#include "third_party/blink/renderer/platform/wtf/hash_set.h"
+
+namespace blink {
+
+class AudioNodeInput;
+
+// AudioNodeOutput represents a single output for an AudioNode.
+// It may be connected to one or more AudioNodeInputs.
+class AudioNodeOutput final {
+ USING_FAST_MALLOC(AudioNodeOutput);
+
+ public:
+ // It's OK to pass 0 for numberOfChannels in which case
+ // setNumberOfChannels() must be called later on.
+ static std::unique_ptr<AudioNodeOutput> Create(AudioHandler*,
+ unsigned number_of_channels);
+ void Dispose();
+
+ // Causes our AudioNode to process if it hasn't already for this render
+ // quantum. It returns the bus containing the processed audio for this
+ // output, returning inPlaceBus if in-place processing was possible. Called
+ // from context's audio thread.
+ AudioBus* Pull(AudioBus* in_place_bus, size_t frames_to_process);
+
+ // bus() will contain the rendered audio after pull() is called for each
+ // rendering time quantum.
+ // Called from context's audio thread.
+ AudioBus* Bus() const;
+
+ // renderingFanOutCount() is the number of AudioNodeInputs that we're
+ // connected to during rendering. Unlike fanOutCount() it will not change
+ // during the course of a render quantum.
+ unsigned RenderingFanOutCount() const;
+
+ // Must be called with the context's graph lock.
+ void DisconnectAll();
+
+ // Disconnect a specific input or AudioParam.
+ void DisconnectInput(AudioNodeInput&);
+ void DisconnectAudioParam(AudioParamHandler&);
+
+ void SetNumberOfChannels(unsigned);
+ unsigned NumberOfChannels() const { return number_of_channels_; }
+ bool IsChannelCountKnown() const { return NumberOfChannels() > 0; }
+
+ bool IsConnected() { return FanOutCount() > 0 || ParamFanOutCount() > 0; }
+
+ // Probe if the output node is connected with a certain input or AudioParam
+ bool IsConnectedToInput(AudioNodeInput&);
+ bool IsConnectedToAudioParam(AudioParamHandler&);
+
+ // Disable/Enable happens when there are still JavaScript references to a
+ // node, but it has otherwise "finished" its work. For example, when a note
+ // has finished playing. It is kept around, because it may be played again at
+ // a later time. They must be called with the context's graph lock.
+ void Disable();
+ void Enable();
+
+ // updateRenderingState() is called in the audio thread at the start or end of
+ // the render quantum to handle any recent changes to the graph state.
+ // It must be called with the context's graph lock.
+ void UpdateRenderingState();
+
+ private:
+ AudioNodeOutput(AudioHandler*, unsigned number_of_channels);
+ // Can be called from any thread.
+ AudioHandler& Handler() const { return handler_; }
+ DeferredTaskHandler& GetDeferredTaskHandler() const {
+ return handler_.Context()->GetDeferredTaskHandler();
+ }
+
+ // This reference is safe because the AudioHandler owns this AudioNodeOutput
+ // object.
+ AudioHandler& handler_;
+
+ friend class AudioNodeInput;
+ friend class AudioParamHandler;
+
+ // These are called from AudioNodeInput.
+ // They must be called with the context's graph lock.
+ void AddInput(AudioNodeInput&);
+ void RemoveInput(AudioNodeInput&);
+ void AddParam(AudioParamHandler&);
+ void RemoveParam(AudioParamHandler&);
+
+ // fanOutCount() is the number of AudioNodeInputs that we're connected to.
+ // This method should not be called in audio thread rendering code, instead
+ // renderingFanOutCount() should be used.
+ // It must be called with the context's graph lock.
+ unsigned FanOutCount();
+
+ // Similar to fanOutCount(), paramFanOutCount() is the number of AudioParams
+ // that we're connected to. This method should not be called in audio thread
+ // rendering code, instead renderingParamFanOutCount() should be used.
+ // It must be called with the context's graph lock.
+ unsigned ParamFanOutCount();
+
+ // Must be called with the context's graph lock.
+ void DisconnectAllInputs();
+ void DisconnectAllParams();
+
+ // updateInternalBus() updates m_internalBus appropriately for the number of
+ // channels. It is called in the constructor or in the audio thread with the
+ // context's graph lock.
+ void UpdateInternalBus();
+
+ // Announce to any nodes we're connected to that we changed our channel count
+ // for its input.
+ // It must be called in the audio thread with the context's graph lock.
+ void PropagateChannelCount();
+
+ // updateNumberOfChannels() is called in the audio thread at the start or end
+ // of the render quantum to pick up channel changes.
+ // It must be called with the context's graph lock.
+ void UpdateNumberOfChannels();
+
+ // m_numberOfChannels will only be changed in the audio thread.
+ // The main thread sets m_desiredNumberOfChannels which will later get picked
+ // up in the audio thread in updateNumberOfChannels().
+ unsigned number_of_channels_;
+ unsigned desired_number_of_channels_;
+
+ // m_internalBus and m_inPlaceBus must only be changed in the audio thread
+ // with the context's graph lock (or constructor).
+ scoped_refptr<AudioBus> internal_bus_;
+ scoped_refptr<AudioBus> in_place_bus_;
+ // If m_isInPlace is true, use m_inPlaceBus as the valid AudioBus; If false,
+ // use the default m_internalBus.
+ bool is_in_place_;
+
+ // This HashSet holds connection references. We must call
+ // AudioNode::makeConnection when we add an AudioNodeInput to this, and must
+ // call AudioNode::breakConnection() when we remove an AudioNodeInput from
+ // this.
+ HashSet<AudioNodeInput*> inputs_;
+ bool is_enabled_;
+
+ bool did_call_dispose_;
+
+ // For the purposes of rendering, keeps track of the number of inputs and
+ // AudioParams we're connected to. These value should only be changed at the
+ // very start or end of the rendering quantum.
+ unsigned rendering_fan_out_count_;
+ unsigned rendering_param_fan_out_count_;
+
+ // This collection of raw pointers is safe because they are retained by
+ // AudioParam objects retained by m_connectedParams of the owner AudioNode.
+ HashSet<AudioParamHandler*> params_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_NODE_OUTPUT_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_param.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_param.cc
new file mode 100644
index 00000000000..fba9b5fb29f
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_param.cc
@@ -0,0 +1,413 @@
+/*
+ * Copyright (C) 2010 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "third_party/blink/renderer/modules/webaudio/audio_param.h"
+
+#include "third_party/blink/renderer/core/dom/exception_code.h"
+#include "third_party/blink/renderer/core/inspector/console_message.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node_output.h"
+#include "third_party/blink/renderer/platform/audio/audio_utilities.h"
+#include "third_party/blink/renderer/platform/histogram.h"
+#include "third_party/blink/renderer/platform/wtf/math_extras.h"
+
+namespace blink {
+
+const double AudioParamHandler::kDefaultSmoothingConstant = 0.05;
+const double AudioParamHandler::kSnapThreshold = 0.001;
+
+AudioParamHandler::AudioParamHandler(BaseAudioContext& context,
+ AudioParamType param_type,
+ String param_name,
+ double default_value,
+ float min_value,
+ float max_value)
+ : AudioSummingJunction(context.GetDeferredTaskHandler()),
+ param_type_(param_type),
+ param_name_(param_name),
+ intrinsic_value_(default_value),
+ default_value_(default_value),
+ min_value_(min_value),
+ max_value_(max_value) {
+ // The destination MUST exist because we need the destination handler for the
+ // AudioParam.
+ CHECK(context.destination());
+
+ destination_handler_ = &context.destination()->GetAudioDestinationHandler();
+ timeline_.SetSmoothedValue(default_value);
+}
+
+AudioDestinationHandler& AudioParamHandler::DestinationHandler() const {
+ return *destination_handler_;
+}
+
+void AudioParamHandler::SetParamType(AudioParamType param_type) {
+ param_type_ = param_type;
+}
+
+String AudioParamHandler::GetParamName() const {
+ return param_name_;
+}
+
+float AudioParamHandler::Value() {
+ // Update value for timeline.
+ float v = IntrinsicValue();
+ if (GetDeferredTaskHandler().IsAudioThread()) {
+ bool has_value;
+ float timeline_value = timeline_.ValueForContextTime(
+ DestinationHandler(), v, has_value, MinValue(), MaxValue());
+
+ if (has_value)
+ v = timeline_value;
+ }
+
+ SetIntrinsicValue(v);
+ return v;
+}
+
+void AudioParamHandler::SetIntrinsicValue(float new_value) {
+ new_value = clampTo(new_value, min_value_, max_value_);
+ NoBarrierStore(&intrinsic_value_, new_value);
+}
+
+void AudioParamHandler::SetValue(float value) {
+ SetIntrinsicValue(value);
+}
+
+float AudioParamHandler::SmoothedValue() {
+ return timeline_.SmoothedValue();
+}
+
+bool AudioParamHandler::Smooth() {
+ // If values have been explicitly scheduled on the timeline, then use the
+ // exact value. Smoothing effectively is performed by the timeline.
+ bool use_timeline_value = false;
+ float value =
+ timeline_.ValueForContextTime(DestinationHandler(), IntrinsicValue(),
+ use_timeline_value, MinValue(), MaxValue());
+
+ float smoothed_value = timeline_.SmoothedValue();
+ if (smoothed_value == value) {
+ // Smoothed value has already approached and snapped to value.
+ SetIntrinsicValue(value);
+ return true;
+ }
+
+ if (use_timeline_value) {
+ timeline_.SetSmoothedValue(value);
+ } else {
+ // Dezipper - exponential approach.
+ smoothed_value += (value - smoothed_value) * kDefaultSmoothingConstant;
+
+ // If we get close enough then snap to actual value.
+ // FIXME: the threshold needs to be adjustable depending on range - but
+ // this is OK general purpose value.
+ if (fabs(smoothed_value - value) < kSnapThreshold)
+ smoothed_value = value;
+ timeline_.SetSmoothedValue(smoothed_value);
+ }
+
+ SetIntrinsicValue(value);
+ return false;
+}
+
+float AudioParamHandler::FinalValue() {
+ float value = IntrinsicValue();
+ CalculateFinalValues(&value, 1, false);
+ return value;
+}
+
+void AudioParamHandler::CalculateSampleAccurateValues(
+ float* values,
+ unsigned number_of_values) {
+ bool is_safe =
+ GetDeferredTaskHandler().IsAudioThread() && values && number_of_values;
+ DCHECK(is_safe);
+ if (!is_safe)
+ return;
+
+ CalculateFinalValues(values, number_of_values, true);
+}
+
+void AudioParamHandler::CalculateFinalValues(float* values,
+ unsigned number_of_values,
+ bool sample_accurate) {
+ bool is_good =
+ GetDeferredTaskHandler().IsAudioThread() && values && number_of_values;
+ DCHECK(is_good);
+ if (!is_good)
+ return;
+
+ // The calculated result will be the "intrinsic" value summed with all
+ // audio-rate connections.
+
+ if (sample_accurate) {
+ // Calculate sample-accurate (a-rate) intrinsic values.
+ CalculateTimelineValues(values, number_of_values);
+ } else {
+ // Calculate control-rate (k-rate) intrinsic value.
+ bool has_value;
+ float value = IntrinsicValue();
+ float timeline_value = timeline_.ValueForContextTime(
+ DestinationHandler(), value, has_value, MinValue(), MaxValue());
+
+ if (has_value)
+ value = timeline_value;
+
+ values[0] = value;
+ SetIntrinsicValue(value);
+ }
+
+ // Now sum all of the audio-rate connections together (unity-gain summing
+ // junction). Note that connections would normally be mono, but we mix down
+ // to mono if necessary.
+ scoped_refptr<AudioBus> summing_bus =
+ AudioBus::Create(1, number_of_values, false);
+ summing_bus->SetChannelMemory(0, values, number_of_values);
+
+ for (unsigned i = 0; i < NumberOfRenderingConnections(); ++i) {
+ AudioNodeOutput* output = RenderingOutput(i);
+ DCHECK(output);
+
+ // Render audio from this output.
+ AudioBus* connection_bus =
+ output->Pull(nullptr, AudioUtilities::kRenderQuantumFrames);
+
+ // Sum, with unity-gain.
+ summing_bus->SumFrom(*connection_bus);
+ }
+}
+
+void AudioParamHandler::CalculateTimelineValues(float* values,
+ unsigned number_of_values) {
+ // Calculate values for this render quantum. Normally
+ // |numberOfValues| will equal to
+ // AudioUtilities::kRenderQuantumFrames (the render quantum size).
+ double sample_rate = DestinationHandler().SampleRate();
+ size_t start_frame = DestinationHandler().CurrentSampleFrame();
+ size_t end_frame = start_frame + number_of_values;
+
+ // Note we're running control rate at the sample-rate.
+ // Pass in the current value as default value.
+ SetIntrinsicValue(timeline_.ValuesForFrameRange(
+ start_frame, end_frame, IntrinsicValue(), values, number_of_values,
+ sample_rate, sample_rate, MinValue(), MaxValue()));
+}
+
+void AudioParamHandler::Connect(AudioNodeOutput& output) {
+ DCHECK(GetDeferredTaskHandler().IsGraphOwner());
+
+ if (outputs_.Contains(&output))
+ return;
+
+ output.AddParam(*this);
+ outputs_.insert(&output);
+ ChangedOutputs();
+}
+
+void AudioParamHandler::Disconnect(AudioNodeOutput& output) {
+ DCHECK(GetDeferredTaskHandler().IsGraphOwner());
+
+ if (outputs_.Contains(&output)) {
+ outputs_.erase(&output);
+ ChangedOutputs();
+ output.RemoveParam(*this);
+ }
+}
+
+int AudioParamHandler::ComputeQHistogramValue(float new_value) const {
+ // For the Q value, assume a useful range is [0, 25] and that 0.25 dB
+ // resolution is good enough. Then, we can map the floating point Q value (in
+ // dB) to an integer just by multipling by 4 and rounding.
+ new_value = clampTo(new_value, 0.0, 25.0);
+ return static_cast<int>(4 * new_value + 0.5);
+}
+
+// ----------------------------------------------------------------
+
+AudioParam::AudioParam(BaseAudioContext& context,
+ AudioParamType param_type,
+ String param_name,
+ double default_value,
+ float min_value,
+ float max_value)
+ : handler_(AudioParamHandler::Create(context,
+ param_type,
+ param_name,
+ default_value,
+ min_value,
+ max_value)),
+ context_(context) {}
+
+AudioParam* AudioParam::Create(BaseAudioContext& context,
+ AudioParamType param_type,
+ String param_name,
+ double default_value,
+ float min_value,
+ float max_value) {
+ DCHECK_LE(min_value, max_value);
+ return new AudioParam(context, param_type, param_name, default_value,
+ min_value, max_value);
+}
+
+void AudioParam::Trace(blink::Visitor* visitor) {
+ visitor->Trace(context_);
+ ScriptWrappable::Trace(visitor);
+}
+
+float AudioParam::value() const {
+ return Handler().Value();
+}
+
+void AudioParam::WarnIfOutsideRange(const String& param_method, float value) {
+ if (value < minValue() || value > maxValue()) {
+ Context()->GetExecutionContext()->AddConsoleMessage(ConsoleMessage::Create(
+ kJSMessageSource, kWarningMessageLevel,
+ Handler().GetParamName() + "." + param_method + " " +
+ String::Number(value) + " outside nominal range [" +
+ String::Number(minValue()) + ", " + String::Number(maxValue()) +
+ "]; value will be clamped."));
+ }
+}
+
+void AudioParam::setValue(float value) {
+ WarnIfOutsideRange("value", value);
+ Handler().SetValue(value);
+}
+
+void AudioParam::setValue(float value, ExceptionState& exception_state) {
+ WarnIfOutsideRange("value", value);
+
+ // This is to signal any errors, if necessary, about conflicting
+ // automations.
+ setValueAtTime(value, Context()->currentTime(), exception_state);
+ // This is to change the value so that an immediate query for the
+ // value returns the expected values.
+ Handler().SetValue(value);
+}
+
+float AudioParam::defaultValue() const {
+ return Handler().DefaultValue();
+}
+
+float AudioParam::minValue() const {
+ return Handler().MinValue();
+}
+
+float AudioParam::maxValue() const {
+ return Handler().MaxValue();
+}
+
+void AudioParam::SetParamType(AudioParamType param_type) {
+ Handler().SetParamType(param_type);
+}
+
+AudioParam* AudioParam::setValueAtTime(float value,
+ double time,
+ ExceptionState& exception_state) {
+ WarnIfOutsideRange("setValueAtTime value", value);
+ Handler().Timeline().SetValueAtTime(value, time, exception_state);
+ return this;
+}
+
+AudioParam* AudioParam::linearRampToValueAtTime(
+ float value,
+ double time,
+ ExceptionState& exception_state) {
+ WarnIfOutsideRange("linearRampToValueAtTime value", value);
+ Handler().Timeline().LinearRampToValueAtTime(
+ value, time, Handler().IntrinsicValue(), Context()->currentTime(),
+ exception_state);
+
+ return this;
+}
+
+AudioParam* AudioParam::exponentialRampToValueAtTime(
+ float value,
+ double time,
+ ExceptionState& exception_state) {
+ WarnIfOutsideRange("exponentialRampToValue value", value);
+ Handler().Timeline().ExponentialRampToValueAtTime(
+ value, time, Handler().IntrinsicValue(), Context()->currentTime(),
+ exception_state);
+
+ return this;
+}
+
+AudioParam* AudioParam::setTargetAtTime(float target,
+ double time,
+ double time_constant,
+ ExceptionState& exception_state) {
+ WarnIfOutsideRange("setTargetAtTime value", target);
+ Handler().Timeline().SetTargetAtTime(target, time, time_constant,
+ exception_state);
+
+ // Don't update the histogram here. It's not clear in normal usage if the
+ // parameter value will actually reach |target|.
+ return this;
+}
+
+AudioParam* AudioParam::setValueCurveAtTime(const Vector<float>& curve,
+ double time,
+ double duration,
+ ExceptionState& exception_state) {
+ float min = minValue();
+ float max = maxValue();
+
+ // Find the first value in the curve (if any) that is outside the
+ // nominal range. It's probably not necessary to produce a warning
+ // on every value outside the nominal range.
+ for (unsigned k = 0; k < curve.size(); ++k) {
+ float value = curve[k];
+
+ if (value < min || value > max) {
+ WarnIfOutsideRange("setValueCurveAtTime value", value);
+ break;
+ }
+ }
+
+ Handler().Timeline().SetValueCurveAtTime(curve, time, duration,
+ exception_state);
+
+ // We could update the histogram with every value in the curve, due to
+ // interpolation, we'll probably be missing many values. So we don't update
+ // the histogram. setValueCurveAtTime is probably a fairly rare method
+ // anyway.
+ return this;
+}
+
+AudioParam* AudioParam::cancelScheduledValues(double start_time,
+ ExceptionState& exception_state) {
+ Handler().Timeline().CancelScheduledValues(start_time, exception_state);
+ return this;
+}
+
+AudioParam* AudioParam::cancelAndHoldAtTime(double start_time,
+ ExceptionState& exception_state) {
+ Handler().Timeline().CancelAndHoldAtTime(start_time, exception_state);
+ return this;
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_param.h b/chromium/third_party/blink/renderer/modules/webaudio/audio_param.h
new file mode 100644
index 00000000000..00f4e96b31b
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_param.h
@@ -0,0 +1,276 @@
+/*
+ * Copyright (C) 2010 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_PARAM_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_PARAM_H_
+
+#include <sys/types.h>
+#include "base/memory/scoped_refptr.h"
+#include "third_party/blink/renderer/core/typed_arrays/array_buffer_view_helpers.h"
+#include "third_party/blink/renderer/core/typed_arrays/dom_typed_array.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_param_timeline.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_summing_junction.h"
+#include "third_party/blink/renderer/modules/webaudio/base_audio_context.h"
+#include "third_party/blink/renderer/platform/bindings/script_wrappable.h"
+#include "third_party/blink/renderer/platform/wtf/text/wtf_string.h"
+#include "third_party/blink/renderer/platform/wtf/thread_safe_ref_counted.h"
+
+namespace blink {
+
+class AudioNodeOutput;
+
+// Each AudioParam gets an identifier here. This is mostly for instrospection
+// if warnings or other messages need to be printed. It's useful to know what
+// the AudioParam represents. The name should include the node type and the
+// name of the AudioParam.
+enum AudioParamType {
+ kParamTypeAudioBufferSourcePlaybackRate,
+ kParamTypeAudioBufferSourceDetune,
+ kParamTypeBiquadFilterFrequency,
+ kParamTypeBiquadFilterQ,
+ kParamTypeBiquadFilterGain,
+ kParamTypeBiquadFilterDetune,
+ kParamTypeDelayDelayTime,
+ kParamTypeDynamicsCompressorThreshold,
+ kParamTypeDynamicsCompressorKnee,
+ kParamTypeDynamicsCompressorRatio,
+ kParamTypeDynamicsCompressorAttack,
+ kParamTypeDynamicsCompressorRelease,
+ kParamTypeGainGain,
+ kParamTypeOscillatorFrequency,
+ kParamTypeOscillatorDetune,
+ kParamTypeStereoPannerPan,
+ kParamTypePannerPositionX,
+ kParamTypePannerPositionY,
+ kParamTypePannerPositionZ,
+ kParamTypePannerOrientationX,
+ kParamTypePannerOrientationY,
+ kParamTypePannerOrientationZ,
+ kParamTypeAudioListenerPositionX,
+ kParamTypeAudioListenerPositionY,
+ kParamTypeAudioListenerPositionZ,
+ kParamTypeAudioListenerForwardX,
+ kParamTypeAudioListenerForwardY,
+ kParamTypeAudioListenerForwardZ,
+ kParamTypeAudioListenerUpX,
+ kParamTypeAudioListenerUpY,
+ kParamTypeAudioListenerUpZ,
+ kParamTypeConstantSourceOffset,
+ kParamTypeAudioWorklet,
+};
+
+// AudioParamHandler is an actual implementation of web-exposed AudioParam
+// interface. Each of AudioParam object creates and owns an AudioParamHandler,
+// and it is responsible for all of AudioParam tasks. An AudioParamHandler
+// object is owned by the originator AudioParam object, and some audio
+// processing classes have additional references. An AudioParamHandler can
+// outlive the owner AudioParam, and it never dies before the owner AudioParam
+// dies.
+class AudioParamHandler final : public ThreadSafeRefCounted<AudioParamHandler>,
+ public AudioSummingJunction {
+ public:
+ AudioParamType GetParamType() const { return param_type_; }
+ void SetParamType(AudioParamType);
+ // Return a nice name for the AudioParam.
+ String GetParamName() const;
+
+ static const double kDefaultSmoothingConstant;
+ static const double kSnapThreshold;
+
+ static scoped_refptr<AudioParamHandler> Create(BaseAudioContext& context,
+ AudioParamType param_type,
+ String param_name,
+ double default_value,
+ float min_value,
+ float max_value) {
+ return base::AdoptRef(new AudioParamHandler(
+ context, param_type, param_name, default_value, min_value, max_value));
+ }
+
+ // This should be used only in audio rendering thread.
+ AudioDestinationHandler& DestinationHandler() const;
+
+ // AudioSummingJunction
+ void DidUpdate() override {}
+
+ AudioParamTimeline& Timeline() { return timeline_; }
+
+ // Intrinsic value.
+ float Value();
+ void SetValue(float);
+
+ // Final value for k-rate parameters, otherwise use
+ // calculateSampleAccurateValues() for a-rate.
+ // Must be called in the audio thread.
+ float FinalValue();
+
+ float DefaultValue() const { return static_cast<float>(default_value_); }
+ float MinValue() const { return min_value_; }
+ float MaxValue() const { return max_value_; }
+
+ // Value smoothing:
+
+ // When a new value is set with setValue(), in our internal use of the
+ // parameter we don't immediately jump to it. Instead we smoothly approach
+ // this value to avoid glitching.
+ float SmoothedValue();
+
+ // Smoothly exponentially approaches to (de-zippers) the desired value.
+ // Returns true if smoothed value has already snapped exactly to value.
+ bool Smooth();
+
+ void ResetSmoothedValue() { timeline_.SetSmoothedValue(IntrinsicValue()); }
+
+ bool HasSampleAccurateValues() {
+ bool has_values =
+ timeline_.HasValues(destination_handler_->CurrentSampleFrame(),
+ destination_handler_->SampleRate());
+
+ return has_values || NumberOfRenderingConnections();
+ }
+
+ // Calculates numberOfValues parameter values starting at the context's
+ // current time.
+ // Must be called in the context's render thread.
+ void CalculateSampleAccurateValues(float* values, unsigned number_of_values);
+
+ // Connect an audio-rate signal to control this parameter.
+ void Connect(AudioNodeOutput&);
+ void Disconnect(AudioNodeOutput&);
+
+ float IntrinsicValue() const { return NoBarrierLoad(&intrinsic_value_); }
+
+ private:
+ AudioParamHandler(BaseAudioContext&,
+ AudioParamType,
+ String param_name,
+ double default_value,
+ float min,
+ float max);
+
+ // sampleAccurate corresponds to a-rate (audio rate) vs. k-rate in the Web
+ // Audio specification.
+ void CalculateFinalValues(float* values,
+ unsigned number_of_values,
+ bool sample_accurate);
+ void CalculateTimelineValues(float* values, unsigned number_of_values);
+
+ int ComputeQHistogramValue(float) const;
+
+ // The type of AudioParam, indicating what this AudioParam represents and what
+ // node it belongs to. Mostly for informational purposes and doesn't affect
+ // implementation.
+ AudioParamType param_type_;
+ // Name of the AudioParam. This is only used for printing out more
+ // informative warnings, and is otherwise arbitrary.
+ String param_name_;
+
+ // Intrinsic value
+ float intrinsic_value_;
+ void SetIntrinsicValue(float new_value);
+
+ float default_value_;
+
+ // Nominal range for the value
+ float min_value_;
+ float max_value_;
+
+ AudioParamTimeline timeline_;
+
+ // The destination node used to get necessary information like the smaple rate
+ // and context time.
+ scoped_refptr<AudioDestinationHandler> destination_handler_;
+};
+
+// AudioParam class represents web-exposed AudioParam interface.
+class AudioParam final : public ScriptWrappable {
+ DEFINE_WRAPPERTYPEINFO();
+
+ public:
+ static AudioParam* Create(
+ BaseAudioContext&,
+ AudioParamType,
+ String param_name,
+ double default_value,
+ float min_value = -std::numeric_limits<float>::max(),
+ float max_value = std::numeric_limits<float>::max());
+
+ void Trace(blink::Visitor*);
+ // |handler| always returns a valid object.
+ AudioParamHandler& Handler() const { return *handler_; }
+ // |context| always returns a valid object.
+ BaseAudioContext* Context() const { return context_; }
+
+ AudioParamType GetParamType() const { return Handler().GetParamType(); }
+ void SetParamType(AudioParamType);
+ String GetParamName() const;
+
+ float value() const;
+ void setValue(float, ExceptionState&);
+ void setValue(float);
+ float defaultValue() const;
+
+ float minValue() const;
+ float maxValue() const;
+
+ AudioParam* setValueAtTime(float value, double time, ExceptionState&);
+ AudioParam* linearRampToValueAtTime(float value,
+ double time,
+ ExceptionState&);
+ AudioParam* exponentialRampToValueAtTime(float value,
+ double time,
+ ExceptionState&);
+ AudioParam* setTargetAtTime(float target,
+ double time,
+ double time_constant,
+ ExceptionState&);
+ AudioParam* setValueCurveAtTime(const Vector<float>& curve,
+ double time,
+ double duration,
+ ExceptionState&);
+ AudioParam* cancelScheduledValues(double start_time, ExceptionState&);
+ AudioParam* cancelAndHoldAtTime(double start_time, ExceptionState&);
+
+ private:
+ AudioParam(BaseAudioContext&,
+ AudioParamType,
+ String param_name,
+ double default_value,
+ float min,
+ float max);
+
+ void WarnIfOutsideRange(const String& param_methd, float value);
+
+ scoped_refptr<AudioParamHandler> handler_;
+ Member<BaseAudioContext> context_;
+
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_PARAM_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_param.idl b/chromium/third_party/blink/renderer/modules/webaudio/audio_param.idl
new file mode 100644
index 00000000000..c9c6283e3d5
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_param.idl
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2010 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// See https://webaudio.github.io/web-audio-api/#AudioParam
+interface AudioParam {
+ [RaisesException=Setter] attribute float value;
+ readonly attribute float defaultValue;
+
+ // Nominal range for the value.
+ readonly attribute float minValue;
+ readonly attribute float maxValue;
+
+ // Parameter automation.
+ [RaisesException, MeasureAs=AudioParamSetValueAtTime] AudioParam setValueAtTime(float value, double time);
+ [RaisesException, MeasureAs=AudioParamLinearRampToValueAtTime] AudioParam linearRampToValueAtTime(float value, double time);
+ [RaisesException, MeasureAs=AudioParamExponentialRampToValueAtTime] AudioParam exponentialRampToValueAtTime(float value, double time);
+
+ // Exponentially approach the target with a rate having the given time constant.
+ [RaisesException, MeasureAs=AudioParamSetTargetAtTime] AudioParam setTargetAtTime(float target, double time, double timeConstant);
+
+ // Sets an array of arbitrary parameter values starting at time for the given duration.
+ // The number of values will be scaled to fit into the desired duration.
+ [RaisesException, MeasureAs=AudioParamSetValueCurveAtTime] AudioParam setValueCurveAtTime(sequence<float> values, double time, double duration);
+
+ // Cancels all scheduled parameter changes with times greater than or equal to startTime.
+ [RaisesException, MeasureAs=AudioParamCancelScheduledValues] AudioParam cancelScheduledValues(double startTime);
+
+ // Cancel scheduled parameter changes and hold the last value
+ [RaisesException, MeasureAs=AudioParamCancelAndHoldAtTime] AudioParam cancelAndHoldAtTime(double startTime);
+
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_param_descriptor.idl b/chromium/third_party/blink/renderer/modules/webaudio/audio_param_descriptor.idl
new file mode 100644
index 00000000000..0aed454e02b
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_param_descriptor.idl
@@ -0,0 +1,14 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See: https://webaudio.github.io/web-audio-api/#dictdef-audioparamdescriptor
+dictionary AudioParamDescriptor {
+ required DOMString name;
+ float defaultValue = 0;
+
+ // TODO(hongchan): These numbers are minimum/maximum number possible for
+ // |float| type. Remove this comment when the spec is fixed.
+ float minValue = -3.4028235e38;
+ float maxValue = 3.4028235e38;
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_param_map.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_param_map.cc
new file mode 100644
index 00000000000..abc237fc809
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_param_map.cc
@@ -0,0 +1,65 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/modules/webaudio/audio_param_map.h"
+
+namespace blink {
+
+class AudioParamMapIterationSource final
+ : public PairIterable<String, AudioParam*>::IterationSource {
+ public:
+ AudioParamMapIterationSource(
+ const HeapHashMap<String, Member<AudioParam>>& map) {
+ for (const auto name : map.Keys()) {
+ parameter_names_.push_back(name);
+ parameter_objects_.push_back(map.at(name));
+ }
+ }
+
+ bool Next(ScriptState* scrip_state,
+ String& key,
+ AudioParam*& audio_param,
+ ExceptionState&) override {
+ if (current_index_ == parameter_names_.size())
+ return false;
+ key = parameter_names_[current_index_];
+ audio_param = parameter_objects_[current_index_];
+ ++current_index_;
+ return true;
+ }
+
+ virtual void Trace(blink::Visitor* visitor) {
+ visitor->Trace(parameter_objects_);
+ PairIterable<String, AudioParam*>::IterationSource::Trace(visitor);
+ }
+
+ private:
+ // For sequential iteration (e.g. Next()).
+ Vector<String> parameter_names_;
+ HeapVector<Member<AudioParam>> parameter_objects_;
+ unsigned current_index_;
+};
+
+AudioParamMap::AudioParamMap(
+ const HeapHashMap<String, Member<AudioParam>>& parameter_map)
+ : parameter_map_(parameter_map) {}
+
+PairIterable<String, AudioParam*>::IterationSource*
+ AudioParamMap::StartIteration(ScriptState*, ExceptionState&) {
+ return new AudioParamMapIterationSource(parameter_map_);
+}
+
+bool AudioParamMap::GetMapEntry(ScriptState*,
+ const String& key,
+ AudioParam*& audio_param,
+ ExceptionState&) {
+ if (parameter_map_.Contains(key)) {
+ audio_param = parameter_map_.at(key);
+ return true;
+ }
+
+ return false;
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_param_map.h b/chromium/third_party/blink/renderer/modules/webaudio/audio_param_map.h
new file mode 100644
index 00000000000..8815e8e1f09
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_param_map.h
@@ -0,0 +1,53 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_PARAM_MAP_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_PARAM_MAP_H_
+
+#include "third_party/blink/renderer/bindings/core/v8/exception_state.h"
+#include "third_party/blink/renderer/bindings/core/v8/maplike.h"
+#include "third_party/blink/renderer/bindings/core/v8/v8_binding_for_core.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_param.h"
+#include "third_party/blink/renderer/platform/bindings/script_wrappable.h"
+#include "third_party/blink/renderer/platform/heap/handle.h"
+#include "third_party/blink/renderer/platform/wtf/text/wtf_string.h"
+
+namespace blink {
+
+class AudioParam;
+
+class AudioParamMap final : public ScriptWrappable,
+ public Maplike<String, AudioParam*> {
+ DEFINE_WRAPPERTYPEINFO();
+
+ public:
+ explicit AudioParamMap(
+ const HeapHashMap<String, Member<AudioParam>>& parameter_map);
+
+ // IDL attributes / methods
+ size_t size() const { return parameter_map_.size(); }
+
+ AudioParam* At(String name) { return parameter_map_.at(name); }
+ bool Contains(String name) { return parameter_map_.Contains(name); }
+
+ void Trace(blink::Visitor* visitor) override {
+ visitor->Trace(parameter_map_);
+ ScriptWrappable::Trace(visitor);
+ }
+
+ private:
+ PairIterable<String, AudioParam*>::IterationSource* StartIteration(
+ ScriptState*,
+ ExceptionState&) override;
+ bool GetMapEntry(ScriptState*,
+ const String& key,
+ AudioParam*&,
+ ExceptionState&) override;
+
+ const HeapHashMap<String, Member<AudioParam>> parameter_map_;
+};
+
+} // namespace blink
+
+#endif
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_param_map.idl b/chromium/third_party/blink/renderer/modules/webaudio/audio_param_map.idl
new file mode 100644
index 00000000000..4055562723e
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_param_map.idl
@@ -0,0 +1,9 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See https://webaudio.github.io/web-audio-api/#audioparammap
+
+interface AudioParamMap {
+ readonly maplike<DOMString, AudioParam>;
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_param_timeline.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_param_timeline.cc
new file mode 100644
index 00000000000..cd3831e98f2
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_param_timeline.cc
@@ -0,0 +1,1991 @@
+/*
+ * Copyright (C) 2011 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "third_party/blink/renderer/modules/webaudio/audio_param_timeline.h"
+
+#include <algorithm>
+#include <memory>
+
+#include "base/memory/ptr_util.h"
+#include "build/build_config.h"
+#include "third_party/blink/renderer/bindings/core/v8/exception_messages.h"
+#include "third_party/blink/renderer/bindings/core/v8/exception_state.h"
+#include "third_party/blink/renderer/core/dom/exception_code.h"
+#include "third_party/blink/renderer/core/frame/deprecation.h"
+#include "third_party/blink/renderer/core/inspector/console_message.h"
+#include "third_party/blink/renderer/platform/audio/audio_utilities.h"
+#include "third_party/blink/renderer/platform/wtf/cpu.h"
+#include "third_party/blink/renderer/platform/wtf/math_extras.h"
+
+#if defined(ARCH_CPU_X86_FAMILY)
+#include <emmintrin.h>
+#endif
+
+namespace blink {
+
+// For a SetTarget event, if the relative difference between the current value
+// and the target value is less than this, consider them the same and just
+// output the target value. This value MUST be larger than the single precision
+// epsilon of 5.960465e-8. Due to round-off, this value is not achievable in
+// general. This value can vary across the platforms (CPU) and thus it is
+// determined experimentally.
+const float kSetTargetThreshold = 1.5e-6;
+
+// For a SetTarget event, if the target value is 0, and the current value is
+// less than this threshold, consider the curve to have converged to 0. We need
+// a separate case from kSetTargetThreshold because that uses relative error,
+// which is never met if the target value is 0, a common case. This value MUST
+// be larger than least positive normalized single precision
+// value (1.1754944e-38) because we normally operate with flush-to-zero enabled.
+const float kSetTargetZeroThreshold = 1e-20;
+
+static bool IsNonNegativeAudioParamTime(double time,
+ ExceptionState& exception_state,
+ String message = "Time") {
+ if (time >= 0)
+ return true;
+
+ exception_state.ThrowRangeError(
+ message +
+ " must be a finite non-negative number: " + String::Number(time));
+ return false;
+}
+
+static bool IsPositiveAudioParamTime(double time,
+ ExceptionState& exception_state,
+ String message) {
+ if (time > 0)
+ return true;
+
+ exception_state.ThrowRangeError(
+ message + " must be a finite positive number: " + String::Number(time));
+ return false;
+}
+
+String AudioParamTimeline::EventToString(const ParamEvent& event) const {
+ // The default arguments for most automation methods is the value and the
+ // time.
+ String args =
+ String::Number(event.Value()) + ", " + String::Number(event.Time(), 16);
+
+ // Get a nice printable name for the event and update the args if necessary.
+ String s;
+ switch (event.GetType()) {
+ case ParamEvent::kSetValue:
+ s = "setValueAtTime";
+ break;
+ case ParamEvent::kLinearRampToValue:
+ s = "linearRampToValueAtTime";
+ break;
+ case ParamEvent::kExponentialRampToValue:
+ s = "exponentialRampToValue";
+ break;
+ case ParamEvent::kSetTarget:
+ s = "setTargetAtTime";
+ // This has an extra time constant arg
+ args = args + ", " + String::Number(event.TimeConstant(), 16);
+ break;
+ case ParamEvent::kSetValueCurve:
+ s = "setValueCurveAtTime";
+ // Replace the default arg, using "..." to denote the curve argument.
+ args = "..., " + String::Number(event.Time(), 16) + ", " +
+ String::Number(event.Duration(), 16);
+ break;
+ case ParamEvent::kCancelValues:
+ case ParamEvent::kSetValueCurveEnd:
+ // Fall through; we should never have to print out the internal
+ // |kCancelValues| or |kSetValueCurveEnd| event.
+ case ParamEvent::kLastType:
+ NOTREACHED();
+ break;
+ };
+
+ return s + "(" + args + ")";
+}
+
+// Computes the value of a linear ramp event at time t with the given event
+// parameters.
+float AudioParamTimeline::LinearRampAtTime(double t,
+ float value1,
+ double time1,
+ float value2,
+ double time2) {
+ return value1 + (value2 - value1) * (t - time1) / (time2 - time1);
+}
+
+// Computes the value of an exponential ramp event at time t with the given
+// event parameters.
+float AudioParamTimeline::ExponentialRampAtTime(double t,
+ float value1,
+ double time1,
+ float value2,
+ double time2) {
+ return value1 * pow(value2 / value1, (t - time1) / (time2 - time1));
+}
+
+// Compute the value of a set target event at time t with the given event
+// parameters.
+float AudioParamTimeline::TargetValueAtTime(double t,
+ float value1,
+ double time1,
+ float value2,
+ float time_constant) {
+ return value2 + (value1 - value2) * exp(-(t - time1) / time_constant);
+}
+
+// Compute the value of a set curve event at time t with the given event
+// parameters.
+float AudioParamTimeline::ValueCurveAtTime(double t,
+ double time1,
+ double duration,
+ const float* curve_data,
+ unsigned curve_length) {
+ double curve_index = (curve_length - 1) / duration * (t - time1);
+ unsigned k = std::min(static_cast<unsigned>(curve_index), curve_length - 1);
+ unsigned k1 = std::min(k + 1, curve_length - 1);
+ float c0 = curve_data[k];
+ float c1 = curve_data[k1];
+ float delta = std::min(curve_index - k, 1.0);
+
+ return c0 + (c1 - c0) * delta;
+}
+
+std::unique_ptr<AudioParamTimeline::ParamEvent>
+AudioParamTimeline::ParamEvent::CreateSetValueEvent(float value, double time) {
+ return base::WrapUnique(new ParamEvent(ParamEvent::kSetValue, value, time));
+}
+
+std::unique_ptr<AudioParamTimeline::ParamEvent>
+AudioParamTimeline::ParamEvent::CreateLinearRampEvent(float value,
+ double time,
+ float initial_value,
+ double call_time) {
+ return base::WrapUnique(new ParamEvent(ParamEvent::kLinearRampToValue, value,
+ time, initial_value, call_time));
+}
+
+std::unique_ptr<AudioParamTimeline::ParamEvent>
+AudioParamTimeline::ParamEvent::CreateExponentialRampEvent(float value,
+ double time,
+ float initial_value,
+ double call_time) {
+ return base::WrapUnique(new ParamEvent(ParamEvent::kExponentialRampToValue,
+ value, time, initial_value,
+ call_time));
+}
+
+std::unique_ptr<AudioParamTimeline::ParamEvent>
+AudioParamTimeline::ParamEvent::CreateSetTargetEvent(float value,
+ double time,
+ double time_constant) {
+ // The time line code does not expect a timeConstant of 0. (IT
+ // returns NaN or Infinity due to division by zero. The caller
+ // should have converted this to a SetValueEvent.
+ DCHECK_NE(time_constant, 0);
+ return base::WrapUnique(
+ new ParamEvent(ParamEvent::kSetTarget, value, time, time_constant));
+}
+
+std::unique_ptr<AudioParamTimeline::ParamEvent>
+AudioParamTimeline::ParamEvent::CreateSetValueCurveEvent(
+ const Vector<float>& curve,
+ double time,
+ double duration) {
+ double curve_points = (curve.size() - 1) / duration;
+ float end_value = curve.data()[curve.size() - 1];
+
+ return base::WrapUnique(new ParamEvent(ParamEvent::kSetValueCurve, time,
+ duration, curve, curve_points,
+ end_value));
+}
+
+std::unique_ptr<AudioParamTimeline::ParamEvent>
+AudioParamTimeline::ParamEvent::CreateSetValueCurveEndEvent(float value,
+ double time) {
+ return base::WrapUnique(
+ new ParamEvent(ParamEvent::kSetValueCurveEnd, value, time));
+}
+
+std::unique_ptr<AudioParamTimeline::ParamEvent>
+AudioParamTimeline::ParamEvent::CreateCancelValuesEvent(
+ double time,
+ std::unique_ptr<ParamEvent> saved_event) {
+ if (saved_event) {
+ // The savedEvent can only have certain event types. Verify that.
+ ParamEvent::Type saved_type = saved_event->GetType();
+
+ DCHECK_NE(saved_type, ParamEvent::kLastType);
+ DCHECK(saved_type == ParamEvent::kLinearRampToValue ||
+ saved_type == ParamEvent::kExponentialRampToValue ||
+ saved_type == ParamEvent::kSetValueCurve);
+ }
+
+ return base::WrapUnique(
+ new ParamEvent(ParamEvent::kCancelValues, time, std::move(saved_event)));
+}
+
+std::unique_ptr<AudioParamTimeline::ParamEvent>
+AudioParamTimeline::ParamEvent::CreateGeneralEvent(
+ Type type,
+ float value,
+ double time,
+ float initial_value,
+ double call_time,
+ double time_constant,
+ double duration,
+ Vector<float>& curve,
+ double curve_points_per_second,
+ float curve_end_value,
+ std::unique_ptr<ParamEvent> saved_event) {
+ return base::WrapUnique(new ParamEvent(
+ type, value, time, initial_value, call_time, time_constant, duration,
+ curve, curve_points_per_second, curve_end_value, std::move(saved_event)));
+}
+
+AudioParamTimeline::ParamEvent* AudioParamTimeline::ParamEvent::SavedEvent()
+ const {
+ DCHECK_EQ(GetType(), ParamEvent::kCancelValues);
+ return saved_event_.get();
+}
+
+bool AudioParamTimeline::ParamEvent::HasDefaultCancelledValue() const {
+ DCHECK_EQ(GetType(), ParamEvent::kCancelValues);
+ return has_default_cancelled_value_;
+}
+
+void AudioParamTimeline::ParamEvent::SetCancelledValue(float value) {
+ DCHECK_EQ(GetType(), ParamEvent::kCancelValues);
+ value_ = value;
+ has_default_cancelled_value_ = true;
+}
+
+// General event
+AudioParamTimeline::ParamEvent::ParamEvent(
+ ParamEvent::Type type,
+ float value,
+ double time,
+ float initial_value,
+ double call_time,
+ double time_constant,
+ double duration,
+ Vector<float>& curve,
+ double curve_points_per_second,
+ float curve_end_value,
+ std::unique_ptr<ParamEvent> saved_event)
+ : type_(type),
+ value_(value),
+ time_(time),
+ initial_value_(initial_value),
+ call_time_(call_time),
+ time_constant_(time_constant),
+ duration_(duration),
+ curve_points_per_second_(curve_points_per_second),
+ curve_end_value_(curve_end_value),
+ saved_event_(std::move(saved_event)),
+ has_default_cancelled_value_(false) {
+ curve_ = curve;
+}
+
+// Create simplest event needing just a value and time, like setValueAtTime
+AudioParamTimeline::ParamEvent::ParamEvent(ParamEvent::Type type,
+ float value,
+ double time)
+ : type_(type),
+ value_(value),
+ time_(time),
+ initial_value_(0),
+ call_time_(0),
+ time_constant_(0),
+ duration_(0),
+ curve_points_per_second_(0),
+ curve_end_value_(0),
+ saved_event_(nullptr),
+ has_default_cancelled_value_(false) {
+ DCHECK(type == ParamEvent::kSetValue ||
+ type == ParamEvent::kSetValueCurveEnd);
+}
+
+// Create a linear or exponential ramp that requires an initial value and
+// time in case
+// there is no actual event that preceeds this event.
+AudioParamTimeline::ParamEvent::ParamEvent(ParamEvent::Type type,
+ float value,
+ double time,
+ float initial_value,
+ double call_time)
+ : type_(type),
+ value_(value),
+ time_(time),
+ initial_value_(initial_value),
+ call_time_(call_time),
+ time_constant_(0),
+ duration_(0),
+ curve_points_per_second_(0),
+ curve_end_value_(0),
+ saved_event_(nullptr),
+ has_default_cancelled_value_(false) {
+ DCHECK(type == ParamEvent::kLinearRampToValue ||
+ type == ParamEvent::kExponentialRampToValue);
+}
+
+// Create an event needing a time constant (setTargetAtTime)
+AudioParamTimeline::ParamEvent::ParamEvent(ParamEvent::Type type,
+ float value,
+ double time,
+ double time_constant)
+ : type_(type),
+ value_(value),
+ time_(time),
+ initial_value_(0),
+ call_time_(0),
+ time_constant_(time_constant),
+ duration_(0),
+ curve_points_per_second_(0),
+ curve_end_value_(0),
+ saved_event_(nullptr),
+ has_default_cancelled_value_(false) {
+ DCHECK_EQ(type, ParamEvent::kSetTarget);
+}
+
+// Create a setValueCurve event
+AudioParamTimeline::ParamEvent::ParamEvent(ParamEvent::Type type,
+ double time,
+ double duration,
+ const Vector<float>& curve,
+ double curve_points_per_second,
+ float curve_end_value)
+ : type_(type),
+ value_(0),
+ time_(time),
+ initial_value_(0),
+ call_time_(0),
+ time_constant_(0),
+ duration_(duration),
+ curve_points_per_second_(curve_points_per_second),
+ curve_end_value_(curve_end_value),
+ saved_event_(nullptr),
+ has_default_cancelled_value_(false) {
+ DCHECK_EQ(type, ParamEvent::kSetValueCurve);
+ unsigned curve_length = curve.size();
+ curve_.resize(curve_length);
+ memcpy(curve_.data(), curve.data(), curve_length * sizeof(float));
+}
+
+// Create CancelValues event
+AudioParamTimeline::ParamEvent::ParamEvent(
+ ParamEvent::Type type,
+ double time,
+ std::unique_ptr<ParamEvent> saved_event)
+ : type_(type),
+ value_(0),
+ time_(time),
+ initial_value_(0),
+ call_time_(0),
+ time_constant_(0),
+ duration_(0),
+ curve_points_per_second_(0),
+ curve_end_value_(0),
+ saved_event_(std::move(saved_event)),
+ has_default_cancelled_value_(false) {
+ DCHECK_EQ(type, ParamEvent::kCancelValues);
+}
+
+void AudioParamTimeline::SetValueAtTime(float value,
+ double time,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ if (!IsNonNegativeAudioParamTime(time, exception_state))
+ return;
+
+ MutexLocker locker(events_lock_);
+ InsertEvent(ParamEvent::CreateSetValueEvent(value, time), exception_state);
+}
+
+void AudioParamTimeline::LinearRampToValueAtTime(
+ float value,
+ double time,
+ float initial_value,
+ double call_time,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ if (!IsNonNegativeAudioParamTime(time, exception_state))
+ return;
+
+ MutexLocker locker(events_lock_);
+ InsertEvent(
+ ParamEvent::CreateLinearRampEvent(value, time, initial_value, call_time),
+ exception_state);
+}
+
+void AudioParamTimeline::ExponentialRampToValueAtTime(
+ float value,
+ double time,
+ float initial_value,
+ double call_time,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ if (!IsNonNegativeAudioParamTime(time, exception_state))
+ return;
+
+ if (!value) {
+ exception_state.ThrowRangeError(
+ "The float target value provided (" + String::Number(value) +
+ ") should not be in the range (" +
+ String::Number(-std::numeric_limits<float>::denorm_min()) + ", " +
+ String::Number(std::numeric_limits<float>::denorm_min()) + ").");
+ return;
+ }
+
+ MutexLocker locker(events_lock_);
+ InsertEvent(ParamEvent::CreateExponentialRampEvent(value, time, initial_value,
+ call_time),
+ exception_state);
+}
+
+void AudioParamTimeline::SetTargetAtTime(float target,
+ double time,
+ double time_constant,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ if (!IsNonNegativeAudioParamTime(time, exception_state) ||
+ !IsNonNegativeAudioParamTime(time_constant, exception_state,
+ "Time constant"))
+ return;
+
+ MutexLocker locker(events_lock_);
+
+ // If timeConstant = 0, we instantly jump to the target value, so
+ // insert a SetValueEvent instead of SetTargetEvent.
+ if (time_constant == 0) {
+ InsertEvent(ParamEvent::CreateSetValueEvent(target, time), exception_state);
+ } else {
+ InsertEvent(ParamEvent::CreateSetTargetEvent(target, time, time_constant),
+ exception_state);
+ }
+}
+
+void AudioParamTimeline::SetValueCurveAtTime(const Vector<float>& curve,
+ double time,
+ double duration,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ if (!IsNonNegativeAudioParamTime(time, exception_state) ||
+ !IsPositiveAudioParamTime(duration, exception_state, "Duration"))
+ return;
+
+ if (curve.size() < 2) {
+ exception_state.ThrowDOMException(
+ kInvalidStateError,
+ ExceptionMessages::IndexExceedsMinimumBound(
+ "curve length", curve.size(), static_cast<size_t>(2)));
+ return;
+ }
+
+ MutexLocker locker(events_lock_);
+ InsertEvent(ParamEvent::CreateSetValueCurveEvent(curve, time, duration),
+ exception_state);
+
+ // Insert a setValueAtTime event too to establish an event so that all
+ // following events will process from the end of the curve instead of the
+ // beginning.
+ InsertEvent(ParamEvent::CreateSetValueCurveEndEvent(
+ curve.data()[curve.size() - 1], time + duration),
+ exception_state);
+}
+
+void AudioParamTimeline::InsertEvent(std::unique_ptr<ParamEvent> event,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ // Sanity check the event. Be super careful we're not getting infected with
+ // NaN or Inf. These should have been handled by the caller.
+ bool is_valid = event->GetType() < ParamEvent::kLastType &&
+ std::isfinite(event->Value()) &&
+ std::isfinite(event->Time()) &&
+ std::isfinite(event->TimeConstant()) &&
+ std::isfinite(event->Duration()) && event->Duration() >= 0;
+
+ DCHECK(is_valid);
+ if (!is_valid)
+ return;
+
+ unsigned i = 0;
+ double insert_time = event->Time();
+
+ if (!events_.size() &&
+ (event->GetType() == ParamEvent::kLinearRampToValue ||
+ event->GetType() == ParamEvent::kExponentialRampToValue)) {
+ // There are no events preceding these ramps. Insert a new
+ // setValueAtTime event to set the starting point for these
+ // events. Use a time of 0 to make sure it preceeds all other
+ // events. This will get fixed when when handle new events.
+ events_.insert(0, AudioParamTimeline::ParamEvent::CreateSetValueEvent(
+ event->InitialValue(), 0));
+ new_events_.insert(events_[0].get());
+ }
+
+ for (i = 0; i < events_.size(); ++i) {
+ if (event->GetType() == ParamEvent::kSetValueCurve) {
+ // If this event is a SetValueCurve, make sure it doesn't overlap any
+ // existing event. It's ok if the SetValueCurve starts at the same time as
+ // the end of some other duration.
+ double end_time = event->Time() + event->Duration();
+ ParamEvent::Type test_type = events_[i]->GetType();
+ // Events of type |kSetValueCurveEnd| or |kCancelValues| never
+ // conflict.
+ if (!(test_type == ParamEvent::kSetValueCurveEnd ||
+ test_type == ParamEvent::kCancelValues) &&
+ events_[i]->Time() > event->Time() && events_[i]->Time() < end_time) {
+ exception_state.ThrowDOMException(
+ kNotSupportedError,
+ EventToString(*event) + " overlaps " + EventToString(*events_[i]));
+ return;
+ }
+ } else {
+ // Otherwise, make sure this event doesn't overlap any existing
+ // SetValueCurve event.
+ if (events_[i]->GetType() == ParamEvent::kSetValueCurve) {
+ double end_time = events_[i]->Time() + events_[i]->Duration();
+ if (event->GetType() != ParamEvent::kSetValueCurveEnd &&
+ event->Time() >= events_[i]->Time() && event->Time() < end_time) {
+ exception_state.ThrowDOMException(
+ kNotSupportedError, EventToString(*event) + " overlaps " +
+ EventToString(*events_[i]));
+ return;
+ }
+ }
+ }
+
+ // Overwrite same event type and time.
+ if (events_[i]->Time() == insert_time &&
+ events_[i]->GetType() == event->GetType()) {
+ // Be sure to remove the old event from |new_events_| too, in
+ // case it was just added.
+ if (new_events_.Contains(events_[i].get())) {
+ new_events_.erase(events_[i].get());
+ }
+ events_[i] = std::move(event);
+ new_events_.insert(events_[i].get());
+ return;
+ }
+
+ if (events_[i]->Time() > insert_time)
+ break;
+ }
+
+ events_.insert(i, std::move(event));
+ new_events_.insert(events_[i].get());
+}
+
+bool AudioParamTimeline::HasValues(size_t current_frame,
+ double sample_rate) const {
+ MutexTryLocker try_locker(events_lock_);
+
+ if (try_locker.Locked()) {
+ if (events_.size() == 0)
+ return false;
+
+ switch (events_[0]->GetType()) {
+ case ParamEvent::kSetValue:
+ case ParamEvent::kSetValueCurve:
+ case ParamEvent::kSetTarget:
+ // Need automation if the event starts somewhere before the
+ // end of the current render quantum.
+ return events_[0]->Time() <=
+ (current_frame + AudioUtilities::kRenderQuantumFrames) /
+ sample_rate;
+ default:
+ // Otherwise, there's some kind of other event running, so we
+ // need to do automation.
+ return true;
+ }
+ }
+
+ // Can't get the lock so that means the main thread is trying to insert an
+ // event. Just return true then. If the main thread releases the lock before
+ // valueForContextTime or valuesForFrameRange runs, then the there will be an
+ // event on the timeline, so everything is fine. If the lock is held so that
+ // neither valueForContextTime nor valuesForFrameRange can run, this is ok
+ // too, because they have tryLocks to produce a default value. The event will
+ // then get processed in the next rendering quantum.
+ //
+ // Don't want to return false here because that would confuse the processing
+ // of the timeline if previously we returned true and now suddenly return
+ // false, only to return true on the next rendering quantum. Currently, once
+ // a timeline has been introduced it is always true forever because m_events
+ // never shrinks.
+ return true;
+}
+
+void AudioParamTimeline::CancelScheduledValues(
+ double start_time,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ MutexLocker locker(events_lock_);
+
+ // Remove all events starting at startTime.
+ for (unsigned i = 0; i < events_.size(); ++i) {
+ if (events_[i]->Time() >= start_time) {
+ RemoveCancelledEvents(i);
+ break;
+ }
+ }
+}
+
+void AudioParamTimeline::CancelAndHoldAtTime(double cancel_time,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ if (!IsNonNegativeAudioParamTime(cancel_time, exception_state))
+ return;
+
+ MutexLocker locker(events_lock_);
+
+ unsigned i;
+ // Find the first event at or just past cancelTime.
+ for (i = 0; i < events_.size(); ++i) {
+ if (events_[i]->Time() > cancel_time) {
+ break;
+ }
+ }
+
+ // The event that is being cancelled. This is the event just past
+ // cancelTime, if any.
+ unsigned cancelled_event_index = i;
+
+ // If the event just before cancelTime is a SetTarget or SetValueCurve
+ // event, we need to handle that event specially instead of the event after.
+ if (i > 0 && ((events_[i - 1]->GetType() == ParamEvent::kSetTarget) ||
+ (events_[i - 1]->GetType() == ParamEvent::kSetValueCurve))) {
+ cancelled_event_index = i - 1;
+ } else if (i >= events_.size()) {
+ // If there were no events occurring after |cancelTime| (and the
+ // previous event is not SetTarget or SetValueCurve, we're done.
+ return;
+ }
+
+ // cancelledEvent is the event that is being cancelled.
+ ParamEvent* cancelled_event = events_[cancelled_event_index].get();
+ ParamEvent::Type event_type = cancelled_event->GetType();
+
+ // New event to be inserted, if any, and a SetValueEvent if needed.
+ std::unique_ptr<ParamEvent> new_event = nullptr;
+ std::unique_ptr<ParamEvent> new_set_value_event = nullptr;
+
+ switch (event_type) {
+ case ParamEvent::kLinearRampToValue:
+ case ParamEvent::kExponentialRampToValue: {
+ // For these events we need to remember the parameters of the event
+ // for a CancelValues event so that we can properly cancel the event
+ // and hold the value.
+ std::unique_ptr<ParamEvent> saved_event = ParamEvent::CreateGeneralEvent(
+ event_type, cancelled_event->Value(), cancelled_event->Time(),
+ cancelled_event->InitialValue(), cancelled_event->CallTime(),
+ cancelled_event->TimeConstant(), cancelled_event->Duration(),
+ cancelled_event->Curve(), cancelled_event->CurvePointsPerSecond(),
+ cancelled_event->CurveEndValue(), nullptr);
+
+ new_event = ParamEvent::CreateCancelValuesEvent(cancel_time,
+ std::move(saved_event));
+ } break;
+ case ParamEvent::kSetTarget: {
+ // Don't want to remove the SetTarget event, so bump the index. But
+ // we do want to insert a cancelEvent so that we stop this
+ // automation and hold the value when we get there.
+ ++cancelled_event_index;
+
+ new_event = ParamEvent::CreateCancelValuesEvent(cancel_time, nullptr);
+ } break;
+ case ParamEvent::kSetValueCurve: {
+ double new_duration = cancel_time - cancelled_event->Time();
+
+ if (cancel_time > cancelled_event->Time() + cancelled_event->Duration()) {
+ // If the cancellation time is past the end of the curve,
+ // there's nothing to do except remove the following events.
+ ++cancelled_event_index;
+ } else {
+ // Cancellation time is in the middle of the curve. Therefore,
+ // create a new SetValueCurve event with the appropriate new
+ // parameters to cancel this event properly. Since it's illegal
+ // to insert any event within a SetValueCurve event, we can
+ // compute the new end value now instead of doing when running
+ // the timeline.
+ float end_value = ValueCurveAtTime(
+ cancel_time, cancelled_event->Time(), cancelled_event->Duration(),
+ cancelled_event->Curve().data(), cancelled_event->Curve().size());
+
+ // Replace the existing SetValueCurve with this new one that is
+ // identical except for the duration.
+ new_event = ParamEvent::CreateGeneralEvent(
+ event_type, cancelled_event->Value(), cancelled_event->Time(),
+ cancelled_event->InitialValue(), cancelled_event->CallTime(),
+ cancelled_event->TimeConstant(), new_duration,
+ cancelled_event->Curve(), cancelled_event->CurvePointsPerSecond(),
+ end_value, nullptr);
+
+ new_set_value_event = ParamEvent::CreateSetValueEvent(
+ end_value, cancelled_event->Time() + new_duration);
+ }
+ } break;
+ case ParamEvent::kSetValue:
+ case ParamEvent::kSetValueCurveEnd:
+ case ParamEvent::kCancelValues:
+ // Nothing needs to be done for a SetValue or CancelValues event.
+ break;
+ case ParamEvent::kLastType:
+ NOTREACHED();
+ break;
+ }
+
+ // Now remove all the following events from the timeline.
+ if (cancelled_event_index < events_.size()) {
+ RemoveCancelledEvents(cancelled_event_index);
+ }
+
+ // Insert the new event, if any.
+ if (new_event) {
+ InsertEvent(std::move(new_event), exception_state);
+ if (new_set_value_event)
+ InsertEvent(std::move(new_set_value_event), exception_state);
+ }
+}
+
+float AudioParamTimeline::ValueForContextTime(
+ AudioDestinationHandler& audio_destination,
+ float default_value,
+ bool& has_value,
+ float min_value,
+ float max_value) {
+ {
+ MutexTryLocker try_locker(events_lock_);
+ if (!try_locker.Locked() || !events_.size() ||
+ audio_destination.CurrentTime() < events_[0]->Time()) {
+ has_value = false;
+ return default_value;
+ }
+ }
+
+ // Ask for just a single value.
+ float value;
+ double sample_rate = audio_destination.SampleRate();
+ size_t start_frame = audio_destination.CurrentSampleFrame();
+ // One parameter change per render quantum.
+ double control_rate = sample_rate / AudioUtilities::kRenderQuantumFrames;
+ value =
+ ValuesForFrameRange(start_frame, start_frame + 1, default_value, &value,
+ 1, sample_rate, control_rate, min_value, max_value);
+
+ has_value = true;
+ return value;
+}
+
+float AudioParamTimeline::ValuesForFrameRange(size_t start_frame,
+ size_t end_frame,
+ float default_value,
+ float* values,
+ unsigned number_of_values,
+ double sample_rate,
+ double control_rate,
+ float min_value,
+ float max_value) {
+ // We can't contend the lock in the realtime audio thread.
+ MutexTryLocker try_locker(events_lock_);
+ if (!try_locker.Locked()) {
+ if (values) {
+ for (unsigned i = 0; i < number_of_values; ++i)
+ values[i] = default_value;
+ }
+ return default_value;
+ }
+
+ float last_value =
+ ValuesForFrameRangeImpl(start_frame, end_frame, default_value, values,
+ number_of_values, sample_rate, control_rate);
+
+ // Clamp the values now to the nominal range
+ for (unsigned k = 0; k < number_of_values; ++k)
+ values[k] = clampTo(values[k], min_value, max_value);
+
+ return last_value;
+}
+
+float AudioParamTimeline::ValuesForFrameRangeImpl(size_t start_frame,
+ size_t end_frame,
+ float default_value,
+ float* values,
+ unsigned number_of_values,
+ double sample_rate,
+ double control_rate) {
+ DCHECK(values);
+ DCHECK_GE(number_of_values, 1u);
+ if (!values || !(number_of_values >= 1))
+ return default_value;
+
+ // Return default value if there are no events matching the desired time
+ // range.
+ if (!events_.size() || (end_frame / sample_rate <= events_[0]->Time())) {
+ FillWithDefault(values, default_value, number_of_values, 0);
+
+ return default_value;
+ }
+
+ int number_of_events = events_.size();
+
+ // MUST clamp event before |events_| is possibly mutated because
+ // |new_events_| has raw pointers to objects in |events_|. Clamping
+ // will clear out all of these pointers before |events_| is
+ // potentially modified.
+ //
+ // TODO(rtoy): Consider making |events_| be scoped_refptr instead of
+ // unique_ptr.
+ if (new_events_.size() > 0) {
+ ClampNewEventsToCurrentTime(start_frame / sample_rate);
+ }
+
+ if (number_of_events > 0) {
+ double current_time = start_frame / sample_rate;
+
+ if (HandleAllEventsInThePast(current_time, sample_rate, default_value,
+ number_of_values, values))
+ return default_value;
+ }
+
+ // Maintain a running time (frame) and index for writing the values buffer.
+ size_t current_frame = start_frame;
+ unsigned write_index = 0;
+
+ // If first event is after startFrame then fill initial part of values buffer
+ // with defaultValue until we reach the first event time.
+ std::tie(current_frame, write_index) =
+ HandleFirstEvent(values, default_value, number_of_values, start_frame,
+ end_frame, sample_rate, current_frame, write_index);
+
+ float value = default_value;
+
+ // Go through each event and render the value buffer where the times overlap,
+ // stopping when we've rendered all the requested values.
+ int last_skipped_event_index = 0;
+ for (int i = 0; i < number_of_events && write_index < number_of_values; ++i) {
+ ParamEvent* event = events_[i].get();
+ ParamEvent* next_event =
+ i < number_of_events - 1 ? events_[i + 1].get() : nullptr;
+
+ // Wait until we get a more recent event.
+ if (!IsEventCurrent(event, next_event, current_frame, sample_rate)) {
+ // This is not the special SetValue event case, and nextEvent is
+ // in the past. We can skip processing of this event since it's
+ // in past. We keep track of this event in lastSkippedEventIndex
+ // to note what events we've skipped.
+ last_skipped_event_index = i;
+ continue;
+ }
+
+ // If there's no next event, set nextEventType to LastType to indicate that.
+ ParamEvent::Type next_event_type =
+ next_event ? static_cast<ParamEvent::Type>(next_event->GetType())
+ : ParamEvent::kLastType;
+
+ ProcessSetTargetFollowedByRamp(i, event, next_event_type, current_frame,
+ sample_rate, control_rate, value);
+
+ float value1 = event->Value();
+ double time1 = event->Time();
+
+ float value2 = next_event ? next_event->Value() : value1;
+ double time2 =
+ next_event ? next_event->Time() : end_frame / sample_rate + 1;
+
+ // Check to see if an event was cancelled.
+ std::tie(value2, time2, next_event_type) =
+ HandleCancelValues(event, next_event, value2, time2);
+
+ DCHECK_GE(time2, time1);
+
+ // |fillToEndFrame| is the exclusive upper bound of the last frame to be
+ // computed for this event. It's either the last desired frame (|endFrame|)
+ // or derived from the end time of the next event (time2). We compute
+ // ceil(time2*sampleRate) because fillToEndFrame is the exclusive upper
+ // bound. Consider the case where |startFrame| = 128 and time2 = 128.1
+ // (assuming sampleRate = 1). Since time2 is greater than 128, we want to
+ // output a value for frame 128. This requires that fillToEndFrame be at
+ // least 129. This is achieved by ceil(time2).
+ //
+ // However, time2 can be very large, so compute this carefully in the case
+ // where time2 exceeds the size of a size_t.
+
+ size_t fill_to_end_frame = end_frame;
+ if (end_frame > time2 * sample_rate)
+ fill_to_end_frame = static_cast<size_t>(ceil(time2 * sample_rate));
+
+ DCHECK_GE(fill_to_end_frame, start_frame);
+ size_t fill_to_frame = fill_to_end_frame - start_frame;
+ fill_to_frame =
+ std::min(fill_to_frame, static_cast<size_t>(number_of_values));
+
+ const AutomationState current_state = {
+ number_of_values,
+ start_frame,
+ end_frame,
+ sample_rate,
+ control_rate,
+ fill_to_frame,
+ fill_to_end_frame,
+ value1,
+ time1,
+ value2,
+ time2,
+ event,
+ i,
+ };
+
+ // First handle linear and exponential ramps which require looking ahead to
+ // the next event.
+ if (next_event_type == ParamEvent::kLinearRampToValue) {
+ std::tie(current_frame, value, write_index) = ProcessLinearRamp(
+ current_state, values, current_frame, value, write_index);
+ } else if (next_event_type == ParamEvent::kExponentialRampToValue) {
+ std::tie(current_frame, value, write_index) = ProcessExponentialRamp(
+ current_state, values, current_frame, value, write_index);
+ } else {
+ // Handle event types not requiring looking ahead to the next event.
+ switch (event->GetType()) {
+ case ParamEvent::kSetValue:
+ case ParamEvent::kSetValueCurveEnd:
+ case ParamEvent::kLinearRampToValue: {
+ current_frame = fill_to_end_frame;
+
+ // Simply stay at a constant value.
+ value = event->Value();
+ write_index =
+ FillWithDefault(values, value, fill_to_frame, write_index);
+
+ break;
+ }
+
+ case ParamEvent::kCancelValues: {
+ std::tie(current_frame, value, write_index) = ProcessCancelValues(
+ current_state, values, current_frame, value, write_index);
+ break;
+ }
+
+ case ParamEvent::kExponentialRampToValue: {
+ current_frame = fill_to_end_frame;
+
+ // If we're here, we've reached the end of the ramp. If we can
+ // (because the start and end values have the same sign, and neither
+ // is 0), use the actual end value. If not, we have to propagate
+ // whatever we have.
+ if (i >= 1 && ((events_[i - 1]->Value() * event->Value()) > 0))
+ value = event->Value();
+
+ // Simply stay at a constant value from the last time. We don't want
+ // to use the value of the event in case value1 * value2 < 0. In this
+ // case we should propagate the previous value, which is in |value|.
+ write_index =
+ FillWithDefault(values, value, fill_to_frame, write_index);
+
+ break;
+ }
+
+ case ParamEvent::kSetTarget: {
+ std::tie(current_frame, value, write_index) = ProcessSetTarget(
+ current_state, values, current_frame, value, write_index);
+ break;
+ }
+
+ case ParamEvent::kSetValueCurve: {
+ std::tie(current_frame, value, write_index) = ProcessSetValueCurve(
+ current_state, values, current_frame, value, write_index);
+ break;
+ }
+ case ParamEvent::kLastType:
+ NOTREACHED();
+ break;
+ }
+ }
+ }
+
+ // If we skipped over any events (because they are in the past), we can
+ // remove them so we don't have to check them ever again. (This MUST be
+ // running with the m_events lock so we can safely modify the m_events
+ // array.)
+ if (last_skipped_event_index > 0) {
+ // |new_events_| should be empty here so we don't have to
+ // do any updates due to this mutation of |events_|.
+ DCHECK_EQ(new_events_.size(), 0u);
+ events_.EraseAt(0, last_skipped_event_index - 1);
+ }
+
+ // If there's any time left after processing the last event then just
+ // propagate the last value to the end of the values buffer.
+ write_index = FillWithDefault(values, value, number_of_values, write_index);
+
+ // This value is used to set the .value attribute of the AudioParam. it
+ // should be the last computed value.
+ return values[number_of_values - 1];
+}
+
+std::tuple<size_t, unsigned> AudioParamTimeline::HandleFirstEvent(
+ float* values,
+ float default_value,
+ unsigned number_of_values,
+ size_t start_frame,
+ size_t end_frame,
+ double sample_rate,
+ size_t current_frame,
+ unsigned write_index) {
+ double first_event_time = events_[0]->Time();
+ if (first_event_time > start_frame / sample_rate) {
+ // |fillToFrame| is an exclusive upper bound, so use ceil() to compute the
+ // bound from the firstEventTime.
+ size_t fill_to_frame = end_frame;
+ double first_event_frame = ceil(first_event_time * sample_rate);
+ if (end_frame > first_event_frame)
+ fill_to_frame = static_cast<size_t>(first_event_frame);
+ DCHECK_GE(fill_to_frame, start_frame);
+
+ fill_to_frame -= start_frame;
+ fill_to_frame =
+ std::min(fill_to_frame, static_cast<size_t>(number_of_values));
+ write_index =
+ FillWithDefault(values, default_value, fill_to_frame, write_index);
+
+ current_frame += fill_to_frame;
+ }
+
+ return std::make_tuple(current_frame, write_index);
+}
+
+bool AudioParamTimeline::IsEventCurrent(const ParamEvent* event,
+ const ParamEvent* next_event,
+ size_t current_frame,
+ double sample_rate) const {
+ // WARNING: due to round-off it might happen that nextEvent->time() is
+ // just larger than currentFrame/sampleRate. This means that we will end
+ // up running the |event| again. The code below had better be prepared
+ // for this case! What should happen is the fillToFrame should be 0 so
+ // that while the event is actually run again, nothing actually gets
+ // computed, and we move on to the next event.
+ //
+ // An example of this case is setValueCurveAtTime. The time at which
+ // setValueCurveAtTime ends (and the setValueAtTime begins) might be
+ // just past currentTime/sampleRate. Then setValueCurveAtTime will be
+ // processed again before advancing to setValueAtTime. The number of
+ // frames to be processed should be zero in this case.
+ if (next_event && next_event->Time() < current_frame / sample_rate) {
+ // But if the current event is a SetValue event and the event time is
+ // between currentFrame - 1 and curentFrame (in time). we don't want to
+ // skip it. If we do skip it, the SetValue event is completely skipped
+ // and not applied, which is wrong. Other events don't have this problem.
+ // (Because currentFrame is unsigned, we do the time check in this funny,
+ // but equivalent way.)
+ double event_frame = event->Time() * sample_rate;
+
+ // Condition is currentFrame - 1 < eventFrame <= currentFrame, but
+ // currentFrame is unsigned and could be 0, so use
+ // currentFrame < eventFrame + 1 instead.
+ if (!(((event->GetType() == ParamEvent::kSetValue ||
+ event->GetType() == ParamEvent::kSetValueCurveEnd) &&
+ (event_frame <= current_frame) &&
+ (current_frame < event_frame + 1)))) {
+ // This is not the special SetValue event case, and nextEvent is
+ // in the past. We can skip processing of this event since it's
+ // in past.
+ return false;
+ }
+ }
+ return true;
+}
+
+void AudioParamTimeline::ClampNewEventsToCurrentTime(double current_time) {
+ bool clamped_some_event_time = false;
+
+ for (auto event : new_events_) {
+ if (event->Time() < current_time) {
+ event->SetTime(current_time);
+ clamped_some_event_time = true;
+ }
+ }
+
+ if (clamped_some_event_time) {
+ // If we clamped some event time to current time, we need to sort
+ // the event list in time order again, but it must be stable!
+ std::stable_sort(events_.begin(), events_.end(), ParamEvent::EventPreceeds);
+ }
+
+ new_events_.clear();
+}
+
+// Test that for a SetTarget event, the current value is close enough
+// to the target value that we can consider the event to have
+// converged to the target.
+static bool HasSetTargetConverged(float value,
+ float target,
+ float discrete_time_constant) {
+ // Let c = |discrete_time_constant|. Then SetTarget computes
+ //
+ // new value = value + (target - value) * c
+ // = value * (1 + (target - value)*c/value)
+ //
+ // We consider the value converged if (target - value) * c is
+ // sufficiently small so as not to change value. This happens if
+ // (target-value)*c/value is a small value, say, eps. Thus, we've converged
+ // if
+ //
+ // |(target-value)*c/value| < eps
+ // or
+ // |target-value|*c < eps*|value|
+ //
+ // However, if target is zero, we need to be careful:
+ //
+ // new value = value + (0 - value) * c
+ // = value * (1 - c)
+ //
+ // So the new value is sufficiently close to zero if |value|*(1-c)
+ // is close enough to zero.
+ return fabs(target - value) * discrete_time_constant <
+ kSetTargetThreshold * fabs(value) ||
+ (target == 0 && fabs(value) < kSetTargetZeroThreshold);
+}
+
+bool AudioParamTimeline::HandleAllEventsInThePast(double current_time,
+ double sample_rate,
+ float& default_value,
+ unsigned number_of_values,
+ float* values) {
+ // Optimize the case where the last event is in the past.
+ ParamEvent* last_event = events_[events_.size() - 1].get();
+ ParamEvent::Type last_event_type = last_event->GetType();
+ double last_event_time = last_event->Time();
+
+ // If the last event is in the past and the event has ended, then we can
+ // just propagate the same value. Except for SetTarget which lasts
+ // "forever". SetValueCurve also has an explicit SetValue at the end of
+ // the curve, so we don't need to worry that SetValueCurve time is a
+ // start time, not an end time.
+ if (last_event_time +
+ 1.5 * AudioUtilities::kRenderQuantumFrames / sample_rate <
+ current_time) {
+ // If the last event is SetTarget, make sure we've converged and, that
+ // we're at least 5 time constants past the start of the event. If not, we
+ // have to continue processing it.
+ if (last_event_type == ParamEvent::kSetTarget) {
+ float discrete_time_constant =
+ static_cast<float>(AudioUtilities::DiscreteTimeConstantForSampleRate(
+ last_event->TimeConstant(), sample_rate));
+ if (HasSetTargetConverged(default_value, last_event->Value(),
+ discrete_time_constant) &&
+ current_time > last_event_time + 5 * last_event->TimeConstant()) {
+ // We've converged. Slam the default value with the target value.
+ default_value = last_event->Value();
+ } else {
+ // Not converged, so give up; we can't remove this event yet.
+ return false;
+ }
+ }
+
+ // |events_| is being mutated. |new_events_| better be empty because there
+ // are raw pointers there.
+ DCHECK_EQ(new_events_.size(), 0U);
+ // The event has finished, so just copy the default value out.
+ // Since all events are now also in the past, we can just remove all
+ // timeline events too because |defaultValue| has the expected
+ // value.
+ FillWithDefault(values, default_value, number_of_values, 0);
+ smoothed_value_ = default_value;
+ events_.clear();
+ return true;
+ }
+
+ return false;
+}
+
+void AudioParamTimeline::ProcessSetTargetFollowedByRamp(
+ int event_index,
+ ParamEvent*& event,
+ ParamEvent::Type next_event_type,
+ size_t current_frame,
+ double sample_rate,
+ double control_rate,
+ float& value) {
+ // If the current event is SetTarget and the next event is a
+ // LinearRampToValue or ExponentialRampToValue, special handling is needed.
+ // In this case, the linear and exponential ramp should start at wherever
+ // the SetTarget processing has reached.
+ if (event->GetType() == ParamEvent::kSetTarget &&
+ (next_event_type == ParamEvent::kLinearRampToValue ||
+ next_event_type == ParamEvent::kExponentialRampToValue)) {
+ // Replace the SetTarget with a SetValue to set the starting time and
+ // value for the ramp using the current frame. We need to update |value|
+ // appropriately depending on whether the ramp has started or not.
+ //
+ // If SetTarget starts somewhere between currentFrame - 1 and
+ // currentFrame, we directly compute the value it would have at
+ // currentFrame. If not, we update the value from the value from
+ // currentFrame - 1.
+ //
+ // Can't use the condition currentFrame - 1 <= t0 * sampleRate <=
+ // currentFrame because currentFrame is unsigned and could be 0. Instead,
+ // compute the condition this way,
+ // where f = currentFrame and Fs = sampleRate:
+ //
+ // f - 1 <= t0 * Fs <= f
+ // 2 * f - 2 <= 2 * Fs * t0 <= 2 * f
+ // -2 <= 2 * Fs * t0 - 2 * f <= 0
+ // -1 <= 2 * Fs * t0 - 2 * f + 1 <= 1
+ // abs(2 * Fs * t0 - 2 * f + 1) <= 1
+ if (fabs(2 * sample_rate * event->Time() - 2 * current_frame + 1) <= 1) {
+ // SetTarget is starting somewhere between currentFrame - 1 and
+ // currentFrame. Compute the value the SetTarget would have at the
+ // currentFrame.
+ value = event->Value() +
+ (value - event->Value()) *
+ exp(-(current_frame / sample_rate - event->Time()) /
+ event->TimeConstant());
+ } else {
+ // SetTarget has already started. Update |value| one frame because it's
+ // the value from the previous frame.
+ float discrete_time_constant =
+ static_cast<float>(AudioUtilities::DiscreteTimeConstantForSampleRate(
+ event->TimeConstant(), control_rate));
+ value += (event->Value() - value) * discrete_time_constant;
+ }
+
+ // Insert a SetValueEvent to mark the starting value and time.
+ // Clear the clamp check because this doesn't need it.
+ events_[event_index] =
+ ParamEvent::CreateSetValueEvent(value, current_frame / sample_rate);
+
+ // Update our pointer to the current event because we just changed it.
+ event = events_[event_index].get();
+ }
+}
+
+std::tuple<float, double, AudioParamTimeline::ParamEvent::Type>
+AudioParamTimeline::HandleCancelValues(const ParamEvent* current_event,
+ ParamEvent* next_event,
+ float value2,
+ double time2) {
+ DCHECK(current_event);
+
+ ParamEvent::Type next_event_type =
+ next_event ? next_event->GetType() : ParamEvent::kLastType;
+
+ if (next_event && next_event->GetType() == ParamEvent::kCancelValues) {
+ float value1 = current_event->Value();
+ double time1 = current_event->Time();
+
+ switch (current_event->GetType()) {
+ case ParamEvent::kLinearRampToValue:
+ case ParamEvent::kExponentialRampToValue:
+ case ParamEvent::kSetValueCurveEnd:
+ case ParamEvent::kSetValue: {
+ // These three events potentially establish a starting value for
+ // the following event, so we need to examine the cancelled
+ // event to see what to do.
+ const ParamEvent* saved_event = next_event->SavedEvent();
+
+ // Update the end time and type to pretend that we're running
+ // this saved event type.
+ time2 = next_event->Time();
+ next_event_type = saved_event->GetType();
+
+ if (next_event->HasDefaultCancelledValue()) {
+ // We've already established a value for the cancelled
+ // event, so just return it.
+ value2 = next_event->Value();
+ } else {
+ // If the next event would have been a LinearRamp or
+ // ExponentialRamp, we need to compute a new end value for
+ // the event so that the curve works continues as if it were
+ // not cancelled.
+ switch (saved_event->GetType()) {
+ case ParamEvent::kLinearRampToValue:
+ value2 =
+ LinearRampAtTime(next_event->Time(), value1, time1,
+ saved_event->Value(), saved_event->Time());
+ break;
+ case ParamEvent::kExponentialRampToValue:
+ value2 = ExponentialRampAtTime(next_event->Time(), value1, time1,
+ saved_event->Value(),
+ saved_event->Time());
+ break;
+ case ParamEvent::kSetValueCurve:
+ case ParamEvent::kSetValueCurveEnd:
+ case ParamEvent::kSetValue:
+ case ParamEvent::kSetTarget:
+ case ParamEvent::kCancelValues:
+ // These cannot be possible types for the saved event
+ // because they can't be created.
+ // createCancelValuesEvent doesn't allow them (SetValue,
+ // SetTarget, CancelValues) or cancelScheduledValues()
+ // doesn't create such an event (SetValueCurve).
+ NOTREACHED();
+ break;
+ case ParamEvent::kLastType:
+ // Illegal event type.
+ NOTREACHED();
+ break;
+ }
+
+ // Cache the new value so we don't keep computing it over and over.
+ next_event->SetCancelledValue(value2);
+ }
+ } break;
+ case ParamEvent::kSetValueCurve:
+ // Everything needed for this was handled when cancelling was
+ // done.
+ break;
+ case ParamEvent::kSetTarget:
+ case ParamEvent::kCancelValues:
+ // Nothing special needs to be done for SetTarget or
+ // CancelValues followed by CancelValues.
+ break;
+ case ParamEvent::kLastType:
+ NOTREACHED();
+ break;
+ }
+ }
+
+ return std::make_tuple(value2, time2, next_event_type);
+}
+
+std::tuple<size_t, float, unsigned> AudioParamTimeline::ProcessLinearRamp(
+ const AutomationState& current_state,
+ float* values,
+ size_t current_frame,
+ float value,
+ unsigned write_index) {
+#if defined(ARCH_CPU_X86_FAMILY)
+ auto number_of_values = current_state.number_of_values;
+#endif
+ auto fill_to_frame = current_state.fill_to_frame;
+ auto time1 = current_state.time1;
+ auto time2 = current_state.time2;
+ auto value1 = current_state.value1;
+ auto value2 = current_state.value2;
+ auto sample_rate = current_state.sample_rate;
+
+ double delta_time = time2 - time1;
+ float k = delta_time > 0 ? 1 / delta_time : 0;
+ const float value_delta = value2 - value1;
+#if defined(ARCH_CPU_X86_FAMILY)
+ if (fill_to_frame > write_index) {
+ // Minimize in-loop operations. Calculate starting value and increment.
+ // Next step: value += inc.
+ // value = value1 +
+ // (currentFrame/sampleRate - time1) * k * (value2 - value1);
+ // inc = 4 / sampleRate * k * (value2 - value1);
+ // Resolve recursion by expanding constants to achieve a 4-step loop
+ // unrolling.
+ // value = value1 +
+ // ((currentFrame/sampleRate - time1) + i * sampleFrameTimeIncr) * k
+ // * (value2 -value1), i in 0..3
+ __m128 v_value =
+ _mm_mul_ps(_mm_set_ps1(1 / sample_rate), _mm_set_ps(3, 2, 1, 0));
+ v_value =
+ _mm_add_ps(v_value, _mm_set_ps1(current_frame / sample_rate - time1));
+ v_value = _mm_mul_ps(v_value, _mm_set_ps1(k * value_delta));
+ v_value = _mm_add_ps(v_value, _mm_set_ps1(value1));
+ __m128 v_inc = _mm_set_ps1(4 / sample_rate * k * value_delta);
+
+ // Truncate loop steps to multiple of 4.
+ unsigned fill_to_frame_trunc =
+ write_index + ((fill_to_frame - write_index) / 4) * 4;
+ // Compute final time.
+ DCHECK_LE(fill_to_frame_trunc, number_of_values);
+ current_frame += fill_to_frame_trunc - write_index;
+
+ // Process 4 loop steps.
+ for (; write_index < fill_to_frame_trunc; write_index += 4) {
+ _mm_storeu_ps(values + write_index, v_value);
+ v_value = _mm_add_ps(v_value, v_inc);
+ }
+ }
+ // Update |value| with the last value computed so that the
+ // .value attribute of the AudioParam gets the correct linear
+ // ramp value, in case the following loop doesn't execute.
+ if (write_index >= 1)
+ value = values[write_index - 1];
+#endif
+ // Serially process remaining values.
+ for (; write_index < fill_to_frame; ++write_index) {
+ float x = (current_frame / sample_rate - time1) * k;
+ // value = (1 - x) * value1 + x * value2;
+ value = value1 + x * value_delta;
+ values[write_index] = value;
+ ++current_frame;
+ }
+
+ return std::make_tuple(current_frame, value, write_index);
+}
+
+std::tuple<size_t, float, unsigned> AudioParamTimeline::ProcessExponentialRamp(
+ const AutomationState& current_state,
+ float* values,
+ size_t current_frame,
+ float value,
+ unsigned write_index) {
+ auto fill_to_frame = current_state.fill_to_frame;
+ auto time1 = current_state.time1;
+ auto time2 = current_state.time2;
+ auto value1 = current_state.value1;
+ auto value2 = current_state.value2;
+ auto sample_rate = current_state.sample_rate;
+
+ if (value1 * value2 <= 0) {
+ // It's an error if value1 and value2 have opposite signs or if one of
+ // them is zero. Handle this by propagating the previous value, and
+ // making it the default.
+ value = value1;
+
+ for (; write_index < fill_to_frame; ++write_index)
+ values[write_index] = value;
+ } else {
+ double delta_time = time2 - time1;
+ double num_sample_frames = delta_time * sample_rate;
+ // The value goes exponentially from value1 to value2 in a duration of
+ // deltaTime seconds according to
+ //
+ // v(t) = v1*(v2/v1)^((t-t1)/(t2-t1))
+ //
+ // Let c be currentFrame and F be the sampleRate. Then we want to
+ // sample v(t) at times t = (c + k)/F for k = 0, 1, ...:
+ //
+ // v((c+k)/F) = v1*(v2/v1)^(((c/F+k/F)-t1)/(t2-t1))
+ // = v1*(v2/v1)^((c/F-t1)/(t2-t1))
+ // *(v2/v1)^((k/F)/(t2-t1))
+ // = v1*(v2/v1)^((c/F-t1)/(t2-t1))
+ // *[(v2/v1)^(1/(F*(t2-t1)))]^k
+ //
+ // Thus, this can be written as
+ //
+ // v((c+k)/F) = V*m^k
+ //
+ // where
+ // V = v1*(v2/v1)^((c/F-t1)/(t2-t1))
+ // m = (v2/v1)^(1/(F*(t2-t1)))
+
+ // Compute the per-sample multiplier.
+ float multiplier = powf(value2 / value1, 1 / num_sample_frames);
+ // Set the starting value of the exponential ramp. Do not attempt
+ // to optimize pow to powf. See crbug.com/771306.
+ value = value1 * pow(value2 / static_cast<double>(value1),
+ (current_frame / sample_rate - time1) / delta_time);
+ for (; write_index < fill_to_frame; ++write_index) {
+ values[write_index] = value;
+ value *= multiplier;
+ ++current_frame;
+ }
+ // |value| got updated one extra time in the above loop. Restore it to
+ // the last computed value.
+ if (write_index >= 1)
+ value /= multiplier;
+
+ // Due to roundoff it's possible that value exceeds value2. Clip value
+ // to value2 if we are within 1/2 frame of time2.
+ if (current_frame > time2 * sample_rate - 0.5)
+ value = value2;
+ }
+
+ return std::make_tuple(current_frame, value, write_index);
+}
+
+std::tuple<size_t, float, unsigned> AudioParamTimeline::ProcessSetTarget(
+ const AutomationState& current_state,
+ float* values,
+ size_t current_frame,
+ float value,
+ unsigned write_index) {
+#if defined(ARCH_CPU_X86_FAMILY)
+ auto number_of_values = current_state.number_of_values;
+#endif
+ auto fill_to_frame = current_state.fill_to_frame;
+ auto time1 = current_state.time1;
+ auto value1 = current_state.value1;
+ auto sample_rate = current_state.sample_rate;
+ auto control_rate = current_state.control_rate;
+ auto fill_to_end_frame = current_state.fill_to_end_frame;
+ auto event = current_state.event;
+
+ // Exponential approach to target value with given time constant.
+ //
+ // v(t) = v2 + (v1 - v2)*exp(-(t-t1/tau))
+ //
+ float target = value1;
+ float time_constant = event->TimeConstant();
+ float discrete_time_constant =
+ static_cast<float>(AudioUtilities::DiscreteTimeConstantForSampleRate(
+ time_constant, control_rate));
+
+ // Set the starting value correctly. This is only needed when the
+ // current time is "equal" to the start time of this event. This is
+ // to get the sampling correct if the start time of this automation
+ // isn't on a frame boundary. Otherwise, we can just continue from
+ // where we left off from the previous rendering quantum.
+ {
+ double ramp_start_frame = time1 * sample_rate;
+ // Condition is c - 1 < r <= c where c = currentFrame and r =
+ // rampStartFrame. Compute it this way because currentFrame is
+ // unsigned and could be 0.
+ if (ramp_start_frame <= current_frame &&
+ current_frame < ramp_start_frame + 1) {
+ value = target +
+ (value - target) *
+ exp(-(current_frame / sample_rate - time1) / time_constant);
+ } else {
+ // Otherwise, need to compute a new value bacause |value| is the
+ // last computed value of SetTarget. Time has progressed by one
+ // frame, so we need to update the value for the new frame.
+ value += (target - value) * discrete_time_constant;
+ }
+ }
+
+ // If the value is close enough to the target, just fill in the data
+ // with the target value.
+ if (HasSetTargetConverged(value, target, discrete_time_constant)) {
+ for (; write_index < fill_to_frame; ++write_index)
+ values[write_index] = target;
+ } else {
+#if defined(ARCH_CPU_X86_FAMILY)
+ if (fill_to_frame > write_index) {
+ // Resolve recursion by expanding constants to achieve a 4-step
+ // loop unrolling.
+ //
+ // v1 = v0 + (t - v0) * c
+ // v2 = v1 + (t - v1) * c
+ // v2 = v0 + (t - v0) * c + (t - (v0 + (t - v0) * c)) * c
+ // v2 = v0 + (t - v0) * c + (t - v0) * c - (t - v0) * c * c
+ // v2 = v0 + (t - v0) * c * (2 - c)
+ // Thus c0 = c, c1 = c*(2-c). The same logic applies to c2 and c3.
+ const float c0 = discrete_time_constant;
+ const float c1 = c0 * (2 - c0);
+ const float c2 = c0 * ((c0 - 3) * c0 + 3);
+ const float c3 = c0 * (c0 * ((4 - c0) * c0 - 6) + 4);
+
+ float delta;
+ __m128 v_c = _mm_set_ps(c2, c1, c0, 0);
+ __m128 v_delta, v_value, v_result;
+
+ // Process 4 loop steps.
+ unsigned fill_to_frame_trunc =
+ write_index + ((fill_to_frame - write_index) / 4) * 4;
+ DCHECK_LE(fill_to_frame_trunc, number_of_values);
+
+ for (; write_index < fill_to_frame_trunc; write_index += 4) {
+ delta = target - value;
+ v_delta = _mm_set_ps1(delta);
+ v_value = _mm_set_ps1(value);
+
+ v_result = _mm_add_ps(v_value, _mm_mul_ps(v_delta, v_c));
+ _mm_storeu_ps(values + write_index, v_result);
+
+ // Update value for next iteration.
+ value += delta * c3;
+ }
+ }
+#endif
+ // Serially process remaining values
+ for (; write_index < fill_to_frame; ++write_index) {
+ values[write_index] = value;
+ value += (target - value) * discrete_time_constant;
+ }
+ // The previous loops may have updated |value| one extra time.
+ // Reset it to the last computed value.
+ if (write_index >= 1)
+ value = values[write_index - 1];
+ current_frame = fill_to_end_frame;
+ }
+
+ return std::make_tuple(current_frame, value, write_index);
+}
+
+std::tuple<size_t, float, unsigned> AudioParamTimeline::ProcessSetValueCurve(
+ const AutomationState& current_state,
+ float* values,
+ size_t current_frame,
+ float value,
+ unsigned write_index) {
+ auto number_of_values = current_state.number_of_values;
+ auto fill_to_frame = current_state.fill_to_frame;
+ auto time1 = current_state.time1;
+ auto sample_rate = current_state.sample_rate;
+ auto start_frame = current_state.start_frame;
+ auto end_frame = current_state.end_frame;
+ auto fill_to_end_frame = current_state.fill_to_end_frame;
+ auto event = current_state.event;
+
+ const Vector<float> curve = event->Curve();
+ const float* curve_data = curve.data();
+ unsigned number_of_curve_points = curve.size();
+
+ float curve_end_value = event->CurveEndValue();
+
+ // Curve events have duration, so don't just use next event time.
+ double duration = event->Duration();
+ // How much to step the curve index for each frame. This is basically
+ // the term (N - 1)/Td in the specification.
+ double curve_points_per_frame = event->CurvePointsPerSecond() / sample_rate;
+
+ if (!number_of_curve_points || duration <= 0 || sample_rate <= 0) {
+ // Error condition - simply propagate previous value.
+ current_frame = fill_to_end_frame;
+ for (; write_index < fill_to_frame; ++write_index)
+ values[write_index] = value;
+ return std::make_tuple(current_frame, value, write_index);
+ }
+
+ // Save old values and recalculate information based on the curve's
+ // duration instead of the next event time.
+ size_t next_event_fill_to_frame = fill_to_frame;
+
+ // fillToEndFrame = min(endFrame,
+ // ceil(sampleRate * (time1 + duration))),
+ // but compute this carefully in case sampleRate*(time1 + duration) is
+ // huge. fillToEndFrame is an exclusive upper bound of the last frame
+ // to be computed, so ceil is used.
+ {
+ double curve_end_frame = ceil(sample_rate * (time1 + duration));
+ if (end_frame > curve_end_frame)
+ fill_to_end_frame = static_cast<size_t>(curve_end_frame);
+ else
+ fill_to_end_frame = end_frame;
+ }
+
+ // |fillToFrame| can be less than |startFrame| when the end of the
+ // setValueCurve automation has been reached, but the next automation
+ // has not yet started. In this case, |fillToFrame| is clipped to
+ // |time1|+|duration| above, but |startFrame| will keep increasing
+ // (because the current time is increasing).
+ fill_to_frame =
+ (fill_to_end_frame < start_frame) ? 0 : fill_to_end_frame - start_frame;
+ fill_to_frame =
+ std::min(fill_to_frame, static_cast<size_t>(number_of_values));
+
+ // Index into the curve data using a floating-point value.
+ // We're scaling the number of curve points by the duration (see
+ // curvePointsPerFrame).
+ double curve_virtual_index = 0;
+ if (time1 < current_frame / sample_rate) {
+ // Index somewhere in the middle of the curve data.
+ // Don't use timeToSampleFrame() since we want the exact
+ // floating-point frame.
+ double frame_offset = current_frame - time1 * sample_rate;
+ curve_virtual_index = curve_points_per_frame * frame_offset;
+ }
+
+ // Set the default value in case fillToFrame is 0.
+ value = curve_end_value;
+
+ // Render the stretched curve data using linear interpolation.
+ // Oversampled curve data can be provided if sharp discontinuities are
+ // desired.
+ unsigned k = 0;
+#if defined(ARCH_CPU_X86_FAMILY)
+ if (fill_to_frame > write_index) {
+ const __m128 v_curve_virtual_index = _mm_set_ps1(curve_virtual_index);
+ const __m128 v_curve_points_per_frame = _mm_set_ps1(curve_points_per_frame);
+ const __m128 v_number_of_curve_points_m1 =
+ _mm_set_ps1(number_of_curve_points - 1);
+ const __m128 v_n1 = _mm_set_ps1(1.0f);
+ const __m128 v_n4 = _mm_set_ps1(4.0f);
+
+ __m128 v_k = _mm_set_ps(3, 2, 1, 0);
+ int a_curve_index0[4];
+ int a_curve_index1[4];
+
+ // Truncate loop steps to multiple of 4
+ unsigned truncated_steps = ((fill_to_frame - write_index) / 4) * 4;
+ unsigned fill_to_frame_trunc = write_index + truncated_steps;
+ DCHECK_LE(fill_to_frame_trunc, number_of_values);
+
+ for (; write_index < fill_to_frame_trunc; write_index += 4) {
+ // Compute current index this way to minimize round-off that would
+ // have occurred by incrementing the index by curvePointsPerFrame.
+ __m128 v_current_virtual_index = _mm_add_ps(
+ v_curve_virtual_index, _mm_mul_ps(v_k, v_curve_points_per_frame));
+ v_k = _mm_add_ps(v_k, v_n4);
+
+ // Clamp index to the last element of the array.
+ __m128i v_curve_index0 = _mm_cvttps_epi32(
+ _mm_min_ps(v_current_virtual_index, v_number_of_curve_points_m1));
+ __m128i v_curve_index1 =
+ _mm_cvttps_epi32(_mm_min_ps(_mm_add_ps(v_current_virtual_index, v_n1),
+ v_number_of_curve_points_m1));
+
+ // Linearly interpolate between the two nearest curve points.
+ // |delta| is clamped to 1 because currentVirtualIndex can exceed
+ // curveIndex0 by more than one. This can happen when we reached
+ // the end of the curve but still need values to fill out the
+ // current rendering quantum.
+ _mm_storeu_si128((__m128i*)a_curve_index0, v_curve_index0);
+ _mm_storeu_si128((__m128i*)a_curve_index1, v_curve_index1);
+ __m128 v_c0 = _mm_set_ps(
+ curve_data[a_curve_index0[3]], curve_data[a_curve_index0[2]],
+ curve_data[a_curve_index0[1]], curve_data[a_curve_index0[0]]);
+ __m128 v_c1 = _mm_set_ps(
+ curve_data[a_curve_index1[3]], curve_data[a_curve_index1[2]],
+ curve_data[a_curve_index1[1]], curve_data[a_curve_index1[0]]);
+ __m128 v_delta = _mm_min_ps(
+ _mm_sub_ps(v_current_virtual_index, _mm_cvtepi32_ps(v_curve_index0)),
+ v_n1);
+
+ __m128 v_value =
+ _mm_add_ps(v_c0, _mm_mul_ps(_mm_sub_ps(v_c1, v_c0), v_delta));
+
+ _mm_storeu_ps(values + write_index, v_value);
+ }
+ // Pass along k to the serial loop.
+ k = truncated_steps;
+ }
+ if (write_index >= 1)
+ value = values[write_index - 1];
+#endif
+ for (; write_index < fill_to_frame; ++write_index, ++k) {
+ // Compute current index this way to minimize round-off that would
+ // have occurred by incrementing the index by curvePointsPerFrame.
+ double current_virtual_index =
+ curve_virtual_index + k * curve_points_per_frame;
+ unsigned curve_index0;
+
+ // Clamp index to the last element of the array.
+ if (current_virtual_index < number_of_curve_points) {
+ curve_index0 = static_cast<unsigned>(current_virtual_index);
+ } else {
+ curve_index0 = number_of_curve_points - 1;
+ }
+
+ unsigned curve_index1 =
+ std::min(curve_index0 + 1, number_of_curve_points - 1);
+
+ // Linearly interpolate between the two nearest curve points.
+ // |delta| is clamped to 1 because currentVirtualIndex can exceed
+ // curveIndex0 by more than one. This can happen when we reached
+ // the end of the curve but still need values to fill out the
+ // current rendering quantum.
+ DCHECK_LT(curve_index0, number_of_curve_points);
+ DCHECK_LT(curve_index1, number_of_curve_points);
+ float c0 = curve_data[curve_index0];
+ float c1 = curve_data[curve_index1];
+ double delta = std::min(current_virtual_index - curve_index0, 1.0);
+
+ value = c0 + (c1 - c0) * delta;
+
+ values[write_index] = value;
+ }
+
+ // If there's any time left after the duration of this event and the
+ // start of the next, then just propagate the last value of the
+ // curveData. Don't modify |value| unless there is time left.
+ if (write_index < next_event_fill_to_frame) {
+ value = curve_end_value;
+ for (; write_index < next_event_fill_to_frame; ++write_index)
+ values[write_index] = value;
+ }
+
+ // Re-adjust current time
+ current_frame += next_event_fill_to_frame;
+
+ return std::make_tuple(current_frame, value, write_index);
+}
+
+std::tuple<size_t, float, unsigned> AudioParamTimeline::ProcessCancelValues(
+ const AutomationState& current_state,
+ float* values,
+ size_t current_frame,
+ float value,
+ unsigned write_index) {
+ auto fill_to_frame = current_state.fill_to_frame;
+ auto time1 = current_state.time1;
+ auto sample_rate = current_state.sample_rate;
+ auto control_rate = current_state.control_rate;
+ auto fill_to_end_frame = current_state.fill_to_end_frame;
+ auto event = current_state.event;
+ auto event_index = current_state.event_index;
+
+ // If the previous event was a SetTarget or ExponentialRamp
+ // event, the current value is one sample behind. Update
+ // the sample value by one sample, but only at the start of
+ // this CancelValues event.
+ if (event->HasDefaultCancelledValue()) {
+ value = event->Value();
+ } else {
+ double cancel_frame = time1 * sample_rate;
+ if (event_index >= 1 && cancel_frame <= current_frame &&
+ current_frame < cancel_frame + 1) {
+ ParamEvent::Type last_event_type = events_[event_index - 1]->GetType();
+ if (last_event_type == ParamEvent::kSetTarget) {
+ float target = events_[event_index - 1]->Value();
+ float time_constant = events_[event_index - 1]->TimeConstant();
+ float discrete_time_constant = static_cast<float>(
+ AudioUtilities::DiscreteTimeConstantForSampleRate(time_constant,
+ control_rate));
+ value += (target - value) * discrete_time_constant;
+ }
+ }
+ }
+
+ // Simply stay at the current value.
+ for (; write_index < fill_to_frame; ++write_index)
+ values[write_index] = value;
+
+ current_frame = fill_to_end_frame;
+
+ return std::make_tuple(current_frame, value, write_index);
+}
+
+unsigned AudioParamTimeline::FillWithDefault(float* values,
+ float default_value,
+ size_t end_frame,
+ unsigned write_index) {
+ size_t index = write_index;
+
+ for (; index < end_frame; ++index)
+ values[index] = default_value;
+
+ return index;
+}
+
+std::tuple<bool, size_t> AudioParamTimeline::EventAtFrame(
+ size_t current_frame,
+ float sample_rate) const {
+
+ size_t number_of_events = events_.size();
+ ParamEvent* event = nullptr;
+ ParamEvent* next_event = nullptr;
+ size_t current_event_index = 0;
+
+ for (current_event_index = 0; current_event_index < number_of_events;
+ ++current_event_index) {
+ event = events_[current_event_index].get();
+ next_event = current_event_index < number_of_events - 1
+ ? events_[current_event_index + 1].get()
+ : nullptr;
+
+ // Exit when we find a current event
+ if (IsEventCurrent(event, next_event, current_frame, sample_rate)) {
+ break;
+ }
+ }
+
+ // No current event, so no conflict.
+ if (current_event_index >= number_of_events) {
+ return std::make_tuple(false, 0);
+ }
+
+ double current_time = current_frame / sample_rate;
+
+ // Determine if setting the value at this time would overlap some
+ // event.
+ if (next_event) {
+ // There's a following event. If the current event has ended
+ // and the next event hasn't started, then there's no conflict.
+ ParamEvent::Type next_type = next_event->GetType();
+ switch (event->GetType()) {
+ case ParamEvent::kSetValue:
+ case ParamEvent::kLinearRampToValue:
+ case ParamEvent::kExponentialRampToValue:
+ // The current event is happening right now or is in the
+ // past and is followed by some automation that starts in
+ // the future (like SetValue, SetTarget, etc.). Then
+ // there's no overlap. Otherwise there is.
+ if (current_time < next_event->Time() &&
+ (next_type == ParamEvent::kSetValue ||
+ next_type == ParamEvent::kSetTarget ||
+ next_type == ParamEvent::kSetValueCurve)) {
+ return std::make_tuple(false, 0);
+ }
+ return std::make_tuple(true, current_event_index);
+ break;
+ default:
+ return std::make_tuple(true, current_event_index);
+ }
+ }
+
+ // No next event.
+ switch (event->GetType()) {
+ case ParamEvent::kSetValue:
+ return std::make_tuple(false, 0);
+ case ParamEvent::kSetValueCurve:
+ if (current_time <= event->Time() + event->Duration()) {
+ return std::make_tuple(true, current_event_index);
+ }
+ break;
+ case ParamEvent::kSetTarget:
+ if (current_time >= event->Time()) {
+ return std::make_tuple(true, current_event_index);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return std::make_tuple(false, 0);
+}
+
+// TODO(crbug.com/764396): Remove this when fixed.
+void AudioParamTimeline::WarnSetterOverlapsEvent(
+ String param_name,
+ size_t event_index,
+ BaseAudioContext& context) const {
+
+ DCHECK_LT(event_index, events_.size());
+
+ ParamEvent* event = events_[event_index].get();
+ size_t next_index = event_index + 1;
+ ParamEvent* next =
+ next_index < events_.size() ? events_[next_index].get() : nullptr;
+
+ String message = EventToString(*event) +
+ (next ? " to " + EventToString(*next) : String(""));
+
+ context.GetExecutionContext()->AddConsoleMessage(
+ ConsoleMessage::Create(kJSMessageSource, kWarningMessageLevel,
+ param_name + ".value setter called at time " +
+ String::Number(context.currentTime(), 16) +
+ " overlaps event " + message));
+}
+
+void AudioParamTimeline::RemoveCancelledEvents(size_t first_event_to_remove) {
+ // For all the events that are being removed, also remove that event
+ // from |new_events_|.
+ if (new_events_.size() > 0) {
+ for (size_t k = first_event_to_remove; k < events_.size(); ++k) {
+ new_events_.erase(events_[k].get());
+ }
+ }
+
+ // Now we can remove the cancelled events from the list.
+ events_.EraseAt(first_event_to_remove,
+ events_.size() - first_event_to_remove);
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_param_timeline.h b/chromium/third_party/blink/renderer/modules/webaudio/audio_param_timeline.h
new file mode 100644
index 00000000000..a37f8b05678
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_param_timeline.h
@@ -0,0 +1,488 @@
+/*
+ * Copyright (C) 2011 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_PARAM_TIMELINE_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_PARAM_TIMELINE_H_
+
+#include "third_party/blink/renderer/core/typed_arrays/dom_typed_array.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_destination_node.h"
+#include "third_party/blink/renderer/modules/webaudio/base_audio_context.h"
+#include "third_party/blink/renderer/platform/wtf/forward.h"
+#include "third_party/blink/renderer/platform/wtf/threading.h"
+#include "third_party/blink/renderer/platform/wtf/vector.h"
+
+#include <tuple>
+
+namespace blink {
+
+class AudioParamTimeline {
+ DISALLOW_NEW();
+
+ public:
+ AudioParamTimeline() = default;
+
+ void SetValueAtTime(float value, double time, ExceptionState&);
+ void LinearRampToValueAtTime(float value,
+ double time,
+ float initial_value,
+ double call_time,
+ ExceptionState&);
+ void ExponentialRampToValueAtTime(float value,
+ double time,
+ float initial_value,
+ double call_time,
+ ExceptionState&);
+ void SetTargetAtTime(float target,
+ double time,
+ double time_constant,
+ ExceptionState&);
+ void SetValueCurveAtTime(const Vector<float>& curve,
+ double time,
+ double duration,
+ ExceptionState&);
+ void CancelScheduledValues(double start_time, ExceptionState&);
+ void CancelAndHoldAtTime(double cancel_time, ExceptionState&);
+
+ // hasValue is set to true if a valid timeline value is returned.
+ // otherwise defaultValue is returned.
+ float ValueForContextTime(AudioDestinationHandler&,
+ float default_value,
+ bool& has_value,
+ float min_value,
+ float max_value);
+
+ // Given the time range in frames, calculates parameter values into the values
+ // buffer and returns the last parameter value calculated for "values" or the
+ // defaultValue if none were calculated. controlRate is the rate (number per
+ // second) at which parameter values will be calculated. It should equal
+ // sampleRate for sample-accurate parameter changes, and otherwise will
+ // usually match the render quantum size such that the parameter value changes
+ // once per render quantum.
+ float ValuesForFrameRange(size_t start_frame,
+ size_t end_frame,
+ float default_value,
+ float* values,
+ unsigned number_of_values,
+ double sample_rate,
+ double control_rate,
+ float min_value,
+ float max_value);
+
+ // Returns true if the AudioParam timeline needs to run in this
+ // rendering quantum. This means some automation is already running
+ // or is scheduled to run in the current rendering quantuym.
+ bool HasValues(size_t current_frame, double sample_rate) const;
+
+ float SmoothedValue() { return smoothed_value_; }
+ void SetSmoothedValue(float v) { smoothed_value_ = v; }
+
+ private:
+ class ParamEvent {
+ public:
+ enum Type {
+ kSetValue,
+ kLinearRampToValue,
+ kExponentialRampToValue,
+ kSetTarget,
+ kSetValueCurve,
+ // For cancelValuesAndHold
+ kCancelValues,
+ // Special marker for the end of a |kSetValueCurve| event.
+ kSetValueCurveEnd,
+ kLastType
+ };
+
+ static std::unique_ptr<ParamEvent> CreateLinearRampEvent(
+ float value,
+ double time,
+ float initial_value,
+ double call_time);
+ static std::unique_ptr<ParamEvent> CreateExponentialRampEvent(
+ float value,
+ double time,
+ float initial_value,
+ double call_time);
+ static std::unique_ptr<ParamEvent> CreateSetValueEvent(float value,
+ double time);
+ static std::unique_ptr<ParamEvent>
+ CreateSetTargetEvent(float value, double time, double time_constant);
+ static std::unique_ptr<ParamEvent> CreateSetValueCurveEvent(
+ const Vector<float>& curve,
+ double time,
+ double duration);
+ static std::unique_ptr<ParamEvent> CreateSetValueCurveEndEvent(float value,
+ double time);
+ static std::unique_ptr<ParamEvent> CreateCancelValuesEvent(
+ double time,
+ std::unique_ptr<ParamEvent> saved_event);
+ // Needed for creating a saved event where we want to supply all
+ // the possible parameters because we're mostly copying an
+ // existing event.
+ static std::unique_ptr<ParamEvent> CreateGeneralEvent(
+ Type,
+ float value,
+ double time,
+ float initial_value,
+ double call_time,
+ double time_constant,
+ double duration,
+ Vector<float>& curve,
+ double curve_points_per_second,
+ float curve_end_value,
+ std::unique_ptr<ParamEvent> saved_event);
+
+ static bool EventPreceeds(const std::unique_ptr<ParamEvent>& a,
+ const std::unique_ptr<ParamEvent>& b) {
+ return a->Time() < b->Time();
+ }
+
+ Type GetType() const { return type_; }
+ float Value() const { return value_; }
+ double Time() const { return time_; }
+ void SetTime(double new_time) { time_ = new_time; }
+ double TimeConstant() const { return time_constant_; }
+ double Duration() const { return duration_; }
+ const Vector<float>& Curve() const { return curve_; }
+ Vector<float>& Curve() { return curve_; }
+ float InitialValue() const { return initial_value_; }
+ double CallTime() const { return call_time_; }
+
+ double CurvePointsPerSecond() const { return curve_points_per_second_; }
+ float CurveEndValue() const { return curve_end_value_; }
+
+ // For CancelValues events. Not valid for any other event.
+ ParamEvent* SavedEvent() const;
+ bool HasDefaultCancelledValue() const;
+ void SetCancelledValue(float);
+
+ private:
+ // General event
+ ParamEvent(Type type,
+ float value,
+ double time,
+ float initial_value,
+ double call_time,
+ double time_constant,
+ double duration,
+ Vector<float>& curve,
+ double curve_points_per_second,
+ float curve_end_value,
+ std::unique_ptr<ParamEvent> saved_event);
+
+ // Create simplest event needing just a value and time, like
+ // setValueAtTime.
+ ParamEvent(Type, float value, double time);
+
+ // Create a linear or exponential ramp that requires an initial
+ // value and time in case there is no actual event that preceeds
+ // this event.
+ ParamEvent(Type,
+ float value,
+ double time,
+ float initial_value,
+ double call_time);
+
+ // Create an event needing a time constant (setTargetAtTime)
+ ParamEvent(Type, float value, double time, double time_constant);
+
+ // Create a setValueCurve event
+ ParamEvent(Type,
+ double time,
+ double duration,
+ const Vector<float>& curve,
+ double curve_points_per_second,
+ float curve_end_value);
+
+ // Create CancelValues event
+ ParamEvent(Type, double time, std::unique_ptr<ParamEvent> saved_event);
+
+ Type type_;
+
+ // The value for the event. The interpretation of this depends on
+ // the event type. Not used for SetValueCurve. For CancelValues,
+ // it is the end value to use when cancelling a LinearRampToValue
+ // or ExponentialRampToValue event.
+ float value_;
+
+ // The time for the event. The interpretation of this depends on
+ // the event type.
+ double time_;
+
+ // Initial value and time to use for linear and exponential ramps that don't
+ // have a preceding event.
+ float initial_value_;
+ double call_time_;
+
+ // Only used for SetTarget events
+ double time_constant_;
+
+ // The following items are only used for SetValueCurve events.
+ //
+ // The duration of the curve.
+ double duration_;
+ // The array of curve points.
+ Vector<float> curve_;
+ // The number of curve points per second. it is used to compute
+ // the curve index step when running the automation.
+ double curve_points_per_second_;
+ // The default value to use at the end of the curve. Normally
+ // it's the last entry in m_curve, but cancelling a SetValueCurve
+ // will set this to a new value.
+ float curve_end_value_;
+
+ // For CancelValues. If CancelValues is in the middle of an event, this
+ // holds the event that is being cancelled, so that processing can
+ // continue as if the event still existed up until we reach the actual
+ // scheduled cancel time.
+ std::unique_ptr<ParamEvent> saved_event_;
+
+ // True if a default value has been assigned to the CancelValues event.
+ bool has_default_cancelled_value_;
+ };
+
+ // State of the timeline for the current event.
+ struct AutomationState {
+ // Parameters for the current automation request. Number of
+ // values to be computed for the automation request
+ const unsigned number_of_values;
+ // Start and end frames for this automation request
+ const size_t start_frame;
+ const size_t end_frame;
+
+ // Sample rate and control rate for this request
+ const double sample_rate;
+ const double control_rate;
+
+ // Parameters needed for processing the current event.
+ const size_t fill_to_frame;
+ const size_t fill_to_end_frame;
+
+ // Value and time for the current event
+ const float value1;
+ const double time1;
+
+ // Value and time for the next event, if any.
+ const float value2;
+ const double time2;
+
+ // The current event, and it's index in the event vector.
+ const ParamEvent* event;
+ const int event_index;
+ };
+
+ void InsertEvent(std::unique_ptr<ParamEvent>, ExceptionState&);
+ float ValuesForFrameRangeImpl(size_t start_frame,
+ size_t end_frame,
+ float default_value,
+ float* values,
+ unsigned number_of_values,
+ double sample_rate,
+ double control_rate);
+
+ // Produce a nice string describing the event in human-readable form.
+ String EventToString(const ParamEvent&) const;
+
+ // Automation functions that compute the vlaue of the specified
+ // automation at the specified time.
+ float LinearRampAtTime(double t,
+ float value1,
+ double time1,
+ float value2,
+ double time2);
+ float ExponentialRampAtTime(double t,
+ float value1,
+ double time1,
+ float value2,
+ double time2);
+ float TargetValueAtTime(double t,
+ float value1,
+ double time1,
+ float value2,
+ float time_constant);
+ float ValueCurveAtTime(double t,
+ double time1,
+ double duration,
+ const float* curve_data,
+ unsigned curve_length);
+
+ // Handles the special case where the first event in the timeline
+ // starts after |startFrame|. These initial values are filled using
+ // |defaultValue|. The updated |currentFrame| and |writeIndex| is
+ // returned.
+ std::tuple<size_t, unsigned> HandleFirstEvent(float* values,
+ float default_value,
+ unsigned number_of_values,
+ size_t start_frame,
+ size_t end_frame,
+ double sample_rate,
+ size_t current_frame,
+ unsigned write_index);
+
+ // Return true if |currentEvent| starts after |currentFrame|, but
+ // also takes into account the |nextEvent| if any.
+ bool IsEventCurrent(const ParamEvent* current_event,
+ const ParamEvent* next_event,
+ size_t current_frame,
+ double sample_rate) const;
+
+ // Clamp times to current time, if needed for any new events. Note,
+ // this method can mutate |events_|, so do call this only in safe
+ // places.
+ void ClampNewEventsToCurrentTime(double current_time);
+
+ // Handle the case where the last event in the timeline is in the
+ // past. Returns false if any event is not in the past. Otherwise,
+ // return true and also fill in |values| with |defaultValue|.
+ // |defaultValue| may be updated with a new value.
+ bool HandleAllEventsInThePast(double current_time,
+ double sample_rate,
+ float& default_value,
+ unsigned number_of_values,
+ float* values);
+
+ // Handle processing of CancelValue event. If cancellation happens, value2,
+ // time2, and nextEventType will be updated with the new value due to
+ // cancellation. The
+ std::tuple<float, double, ParamEvent::Type> HandleCancelValues(
+ const ParamEvent* current_event,
+ ParamEvent* next_event,
+ float value2,
+ double time2);
+
+ // Process a SetTarget event and the next event is a
+ // LinearRampToValue or ExponentialRampToValue event. This requires
+ // special handling because the ramp should start at whatever value
+ // the SetTarget event has reached at this time, instead of using
+ // the value of the SetTarget event.
+ void ProcessSetTargetFollowedByRamp(int event_index,
+ ParamEvent*& current_event,
+ ParamEvent::Type next_event_type,
+ size_t current_frame,
+ double sample_rate,
+ double control_rate,
+ float& value);
+
+ // Handle processing of linearRampEvent, writing the appropriate
+ // values to |values|. Returns the updated |currentFrame|, last
+ // computed |value|, and the updated |writeIndex|.
+ std::tuple<size_t, float, unsigned> ProcessLinearRamp(
+ const AutomationState& current_state,
+ float* values,
+ size_t current_frame,
+ float value,
+ unsigned write_index);
+
+ // Handle processing of exponentialRampEvent, writing the appropriate
+ // values to |values|. Returns the updated |currentFrame|, last
+ // computed |value|, and the updated |writeIndex|.
+ std::tuple<size_t, float, unsigned> ProcessExponentialRamp(
+ const AutomationState& current_state,
+ float* values,
+ size_t current_frame,
+ float value,
+ unsigned write_index);
+
+ // Handle processing of SetTargetEvent, writing the appropriate
+ // values to |values|. Returns the updated |currentFrame|, last
+ // computed |value|, and the updated |writeIndex|.
+ std::tuple<size_t, float, unsigned> ProcessSetTarget(
+ const AutomationState& current_state,
+ float* values,
+ size_t current_frame,
+ float value,
+ unsigned write_index);
+
+ // Handle processing of SetValueCurveEvent, writing the appropriate
+ // values to |values|. Returns the updated |currentFrame|, last
+ // computed |value|, and the updated |writeIndex|.
+ std::tuple<size_t, float, unsigned> ProcessSetValueCurve(
+ const AutomationState& current_state,
+ float* values,
+ size_t current_frame,
+ float value,
+ unsigned write_index);
+
+ // Handle processing of CancelValuesEvent, writing the appropriate
+ // values to |values|. Returns the updated |currentFrame|, last
+ // computed |value|, and the updated |writeIndex|.
+ std::tuple<size_t, float, unsigned> ProcessCancelValues(
+ const AutomationState& current_state,
+ float* values,
+ size_t current_frame,
+ float value,
+ unsigned write_index);
+
+ // Fill the output vector |values| with the value |defaultValue|,
+ // starting at |writeIndex| and continuing up to |endFrame|
+ // (exclusive). |writeIndex| is updated with the new index.
+ unsigned FillWithDefault(float* values,
+ float default_value,
+ size_t end_frame,
+ unsigned write_index);
+
+ // TODO(crbug.com/764396): Remove these two methods when the bug is fixed.
+
+ // |EventAtFrame| finds the current event that would run at the specified
+ // |frame|. The first return value is true if a setValueAtTime call would
+ // overlap some ongoing event. The second return value is the index of the
+ // current event. The second value must be ignored if the first value is
+ // false.
+ std::tuple<bool, size_t> EventAtFrame(size_t frame, float sample_rate) const;
+
+ // Prints a console warning that a call to the AudioParam value setter
+ // overlaps the event at |event_index|. |param_name| is the name of the
+ // AudioParam where the where this is happening.
+ void WarnSetterOverlapsEvent(String param_name,
+ size_t event_index,
+ BaseAudioContext&) const;
+
+ // When cancelling events, remove the items from |events_| starting
+ // at the given index. Update |new_events_| too.
+ void RemoveCancelledEvents(size_t first_event_to_remove);
+
+ // Vector of all automation events for the AudioParam. Access must
+ // be locked via m_eventsLock.
+ Vector<std::unique_ptr<ParamEvent>> events_;
+
+ // Vector of raw pointers to the actual ParamEvent that was
+ // inserted. As new events are added, |new_events_| is updated with
+ // tne new event. When the timline is processed, these events are
+ // clamped to current time by |ClampNewEventsToCurrentTime|. Access
+ // must be locked via |events_lock_|. Must be maintained together
+ // with |events_|.
+ HashSet<ParamEvent*> new_events_;
+
+ mutable Mutex events_lock_;
+
+ // Smoothing (de-zippering)
+ float smoothed_value_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_PARAM_TIMELINE_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_processing_event.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_processing_event.cc
new file mode 100644
index 00000000000..3c3dbedc9b7
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_processing_event.cc
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#include "third_party/blink/renderer/modules/webaudio/audio_processing_event.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_processing_event_init.h"
+
+namespace blink {
+
+AudioProcessingEvent* AudioProcessingEvent::Create() {
+ return new AudioProcessingEvent;
+}
+
+AudioProcessingEvent* AudioProcessingEvent::Create(AudioBuffer* input_buffer,
+ AudioBuffer* output_buffer,
+ double playback_time) {
+ return new AudioProcessingEvent(input_buffer, output_buffer, playback_time);
+}
+
+AudioProcessingEvent* AudioProcessingEvent::Create(
+ const AtomicString& type,
+ const AudioProcessingEventInit& initializer) {
+ return new AudioProcessingEvent(type, initializer);
+}
+
+AudioProcessingEvent::AudioProcessingEvent() = default;
+
+AudioProcessingEvent::AudioProcessingEvent(AudioBuffer* input_buffer,
+ AudioBuffer* output_buffer,
+ double playback_time)
+ : Event(EventTypeNames::audioprocess, Bubbles::kYes, Cancelable::kNo),
+ input_buffer_(input_buffer),
+ output_buffer_(output_buffer),
+ playback_time_(playback_time) {}
+
+AudioProcessingEvent::AudioProcessingEvent(
+ const AtomicString& type,
+ const AudioProcessingEventInit& initializer)
+ : Event(type, initializer) {
+ input_buffer_ = initializer.inputBuffer();
+ output_buffer_ = initializer.outputBuffer();
+ playback_time_ = initializer.playbackTime();
+}
+
+AudioProcessingEvent::~AudioProcessingEvent() = default;
+
+const AtomicString& AudioProcessingEvent::InterfaceName() const {
+ return EventNames::AudioProcessingEvent;
+}
+
+void AudioProcessingEvent::Trace(blink::Visitor* visitor) {
+ visitor->Trace(input_buffer_);
+ visitor->Trace(output_buffer_);
+ Event::Trace(visitor);
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_processing_event.h b/chromium/third_party/blink/renderer/modules/webaudio/audio_processing_event.h
new file mode 100644
index 00000000000..da1b30a0b69
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_processing_event.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_PROCESSING_EVENT_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_PROCESSING_EVENT_H_
+
+#include "base/memory/scoped_refptr.h"
+#include "third_party/blink/renderer/modules/event_modules.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_buffer.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_processing_event_init.h"
+
+namespace blink {
+
+class AudioBuffer;
+class AudioProcessingEventInit;
+
+class AudioProcessingEvent final : public Event {
+ DEFINE_WRAPPERTYPEINFO();
+
+ public:
+ static AudioProcessingEvent* Create();
+ static AudioProcessingEvent* Create(AudioBuffer* input_buffer,
+ AudioBuffer* output_buffer,
+ double playback_time);
+
+ static AudioProcessingEvent* Create(const AtomicString& type,
+ const AudioProcessingEventInit&);
+
+ ~AudioProcessingEvent() override;
+
+ AudioBuffer* inputBuffer() { return input_buffer_.Get(); }
+ AudioBuffer* outputBuffer() { return output_buffer_.Get(); }
+ double playbackTime() const { return playback_time_; }
+
+ const AtomicString& InterfaceName() const override;
+
+ virtual void Trace(blink::Visitor*);
+
+ private:
+ AudioProcessingEvent();
+ AudioProcessingEvent(AudioBuffer* input_buffer,
+ AudioBuffer* output_buffer,
+ double playback_time);
+ AudioProcessingEvent(const AtomicString& type,
+ const AudioProcessingEventInit&);
+
+ Member<AudioBuffer> input_buffer_;
+ Member<AudioBuffer> output_buffer_;
+ double playback_time_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_PROCESSING_EVENT_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_processing_event.idl b/chromium/third_party/blink/renderer/modules/webaudio/audio_processing_event.idl
new file mode 100644
index 00000000000..947990e928f
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_processing_event.idl
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+// See https://webaudio.github.io/web-audio-api/#audioprocessingevent
+[
+ Constructor(DOMString type, AudioProcessingEventInit eventInitDict)
+]
+interface AudioProcessingEvent : Event {
+ readonly attribute double playbackTime;
+ readonly attribute AudioBuffer inputBuffer;
+ readonly attribute AudioBuffer outputBuffer;
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_processing_event_init.idl b/chromium/third_party/blink/renderer/modules/webaudio/audio_processing_event_init.idl
new file mode 100644
index 00000000000..32e12a7b50f
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_processing_event_init.idl
@@ -0,0 +1,10 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See https://webaudio.github.io/web-audio-api/#dictdef-audioprocessingeventinit
+dictionary AudioProcessingEventInit : EventInit {
+ required double playbackTime;
+ required AudioBuffer inputBuffer;
+ required AudioBuffer outputBuffer;
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_scheduled_source_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_scheduled_source_node.cc
new file mode 100644
index 00000000000..f42ff28bbda
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_scheduled_source_node.cc
@@ -0,0 +1,294 @@
+/*
+ * Copyright (C) 2012, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#include "third_party/blink/renderer/modules/webaudio/audio_scheduled_source_node.h"
+
+#include <algorithm>
+#include "third_party/blink/public/platform/task_type.h"
+#include "third_party/blink/renderer/bindings/core/v8/exception_messages.h"
+#include "third_party/blink/renderer/bindings/core/v8/exception_state.h"
+#include "third_party/blink/renderer/core/dom/exception_code.h"
+#include "third_party/blink/renderer/modules/event_modules.h"
+#include "third_party/blink/renderer/modules/webaudio/base_audio_context.h"
+#include "third_party/blink/renderer/platform/audio/audio_utilities.h"
+#include "third_party/blink/renderer/platform/cross_thread_functional.h"
+#include "third_party/blink/renderer/platform/wtf/math_extras.h"
+
+namespace blink {
+
+const double AudioScheduledSourceHandler::kUnknownTime = -1;
+
+AudioScheduledSourceHandler::AudioScheduledSourceHandler(NodeType node_type,
+ AudioNode& node,
+ float sample_rate)
+ : AudioHandler(node_type, node, sample_rate),
+ start_time_(0),
+ end_time_(kUnknownTime),
+ playback_state_(UNSCHEDULED_STATE) {
+ if (Context()->GetExecutionContext()) {
+ task_runner_ = Context()->GetExecutionContext()->GetTaskRunner(
+ TaskType::kMediaElementEvent);
+ }
+}
+
+void AudioScheduledSourceHandler::UpdateSchedulingInfo(
+ size_t quantum_frame_size,
+ AudioBus* output_bus,
+ size_t& quantum_frame_offset,
+ size_t& non_silent_frames_to_process,
+ double& start_frame_offset) {
+ DCHECK(output_bus);
+ if (!output_bus)
+ return;
+
+ DCHECK_EQ(quantum_frame_size,
+ static_cast<size_t>(AudioUtilities::kRenderQuantumFrames));
+ if (quantum_frame_size != AudioUtilities::kRenderQuantumFrames)
+ return;
+
+ double sample_rate = Context()->sampleRate();
+
+ // quantumStartFrame : Start frame of the current time quantum.
+ // quantumEndFrame : End frame of the current time quantum.
+ // startFrame : Start frame for this source.
+ // endFrame : End frame for this source.
+ size_t quantum_start_frame = Context()->CurrentSampleFrame();
+ size_t quantum_end_frame = quantum_start_frame + quantum_frame_size;
+ size_t start_frame =
+ AudioUtilities::TimeToSampleFrame(start_time_, sample_rate);
+ size_t end_frame =
+ end_time_ == kUnknownTime
+ ? 0
+ : AudioUtilities::TimeToSampleFrame(end_time_, sample_rate);
+
+ // If we know the end time and it's already passed, then don't bother doing
+ // any more rendering this cycle.
+ if (end_time_ != kUnknownTime && end_frame <= quantum_start_frame)
+ Finish();
+
+ PlaybackState state = GetPlaybackState();
+
+ if (state == UNSCHEDULED_STATE || state == FINISHED_STATE ||
+ start_frame >= quantum_end_frame) {
+ // Output silence.
+ output_bus->Zero();
+ non_silent_frames_to_process = 0;
+ return;
+ }
+
+ // Check if it's time to start playing.
+ if (state == SCHEDULED_STATE) {
+ // Increment the active source count only if we're transitioning from
+ // SCHEDULED_STATE to PLAYING_STATE.
+ SetPlaybackState(PLAYING_STATE);
+ // Determine the offset of the true start time from the starting frame.
+ start_frame_offset = start_time_ * sample_rate - start_frame;
+ } else {
+ start_frame_offset = 0;
+ }
+
+ quantum_frame_offset =
+ start_frame > quantum_start_frame ? start_frame - quantum_start_frame : 0;
+ quantum_frame_offset = std::min(quantum_frame_offset,
+ quantum_frame_size); // clamp to valid range
+ non_silent_frames_to_process = quantum_frame_size - quantum_frame_offset;
+
+ if (!non_silent_frames_to_process) {
+ // Output silence.
+ output_bus->Zero();
+ return;
+ }
+
+ // Handle silence before we start playing.
+ // Zero any initial frames representing silence leading up to a rendering
+ // start time in the middle of the quantum.
+ if (quantum_frame_offset) {
+ for (unsigned i = 0; i < output_bus->NumberOfChannels(); ++i)
+ memset(output_bus->Channel(i)->MutableData(), 0,
+ sizeof(float) * quantum_frame_offset);
+ }
+
+ // Handle silence after we're done playing.
+ // If the end time is somewhere in the middle of this time quantum, then zero
+ // out the frames from the end time to the very end of the quantum.
+ if (end_time_ != kUnknownTime && end_frame >= quantum_start_frame &&
+ end_frame < quantum_end_frame) {
+ size_t zero_start_frame = end_frame - quantum_start_frame;
+ size_t frames_to_zero = quantum_frame_size - zero_start_frame;
+
+ bool is_safe = zero_start_frame < quantum_frame_size &&
+ frames_to_zero <= quantum_frame_size &&
+ zero_start_frame + frames_to_zero <= quantum_frame_size;
+ DCHECK(is_safe);
+
+ if (is_safe) {
+ if (frames_to_zero > non_silent_frames_to_process)
+ non_silent_frames_to_process = 0;
+ else
+ non_silent_frames_to_process -= frames_to_zero;
+
+ for (unsigned i = 0; i < output_bus->NumberOfChannels(); ++i)
+ memset(output_bus->Channel(i)->MutableData() + zero_start_frame, 0,
+ sizeof(float) * frames_to_zero);
+ }
+
+ Finish();
+ }
+
+ return;
+}
+
+void AudioScheduledSourceHandler::Start(double when,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ Context()->MaybeRecordStartAttempt();
+
+ if (GetPlaybackState() != UNSCHEDULED_STATE) {
+ exception_state.ThrowDOMException(kInvalidStateError,
+ "cannot call start more than once.");
+ return;
+ }
+
+ if (when < 0) {
+ exception_state.ThrowRangeError(
+ ExceptionMessages::IndexExceedsMinimumBound("start time", when, 0.0));
+ return;
+ }
+
+ // The node is started. Add a reference to keep us alive so that audio will
+ // eventually get played even if Javascript should drop all references to this
+ // node. The reference will get dropped when the source has finished playing.
+ Context()->NotifySourceNodeStartedProcessing(GetNode());
+
+ // This synchronizes with process(). updateSchedulingInfo will read some of
+ // the variables being set here.
+ MutexLocker process_locker(process_lock_);
+
+ // If |when| < currentTime, the source must start now according to the spec.
+ // So just set startTime to currentTime in this case to start the source now.
+ start_time_ = std::max(when, Context()->currentTime());
+
+ SetPlaybackState(SCHEDULED_STATE);
+}
+
+void AudioScheduledSourceHandler::Stop(double when,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ if (GetPlaybackState() == UNSCHEDULED_STATE) {
+ exception_state.ThrowDOMException(
+ kInvalidStateError, "cannot call stop without calling start first.");
+ return;
+ }
+
+ if (when < 0) {
+ exception_state.ThrowRangeError(
+ ExceptionMessages::IndexExceedsMinimumBound("stop time", when, 0.0));
+ return;
+ }
+
+ // This synchronizes with process()
+ MutexLocker process_locker(process_lock_);
+
+ // stop() can be called more than once, with the last call to stop taking
+ // effect, unless the source has already stopped due to earlier calls to stop.
+ // No exceptions are thrown in any case.
+ when = std::max(0.0, when);
+ end_time_ = when;
+}
+
+void AudioScheduledSourceHandler::FinishWithoutOnEnded() {
+ if (GetPlaybackState() != FINISHED_STATE) {
+ // Let the context dereference this AudioNode.
+ Context()->NotifySourceNodeFinishedProcessing(this);
+ SetPlaybackState(FINISHED_STATE);
+ }
+}
+
+void AudioScheduledSourceHandler::Finish() {
+ FinishWithoutOnEnded();
+
+ PostCrossThreadTask(*task_runner_, FROM_HERE,
+ CrossThreadBind(&AudioScheduledSourceHandler::NotifyEnded,
+ WrapRefCounted(this)));
+}
+
+void AudioScheduledSourceHandler::NotifyEnded() {
+ DCHECK(IsMainThread());
+ if (!Context() || !Context()->GetExecutionContext())
+ return;
+ if (GetNode())
+ GetNode()->DispatchEvent(Event::Create(EventTypeNames::ended));
+}
+
+// ----------------------------------------------------------------
+
+AudioScheduledSourceNode::AudioScheduledSourceNode(BaseAudioContext& context)
+ : AudioNode(context) {}
+
+AudioScheduledSourceHandler&
+AudioScheduledSourceNode::GetAudioScheduledSourceHandler() const {
+ return static_cast<AudioScheduledSourceHandler&>(Handler());
+}
+
+void AudioScheduledSourceNode::start(ExceptionState& exception_state) {
+ start(0, exception_state);
+}
+
+void AudioScheduledSourceNode::start(double when,
+ ExceptionState& exception_state) {
+ GetAudioScheduledSourceHandler().Start(when, exception_state);
+}
+
+void AudioScheduledSourceNode::stop(ExceptionState& exception_state) {
+ stop(0, exception_state);
+}
+
+void AudioScheduledSourceNode::stop(double when,
+ ExceptionState& exception_state) {
+ GetAudioScheduledSourceHandler().Stop(when, exception_state);
+}
+
+EventListener* AudioScheduledSourceNode::onended() {
+ return GetAttributeEventListener(EventTypeNames::ended);
+}
+
+void AudioScheduledSourceNode::setOnended(EventListener* listener) {
+ SetAttributeEventListener(EventTypeNames::ended, listener);
+}
+
+bool AudioScheduledSourceNode::HasPendingActivity() const {
+ // To avoid the leak, a node should be collected regardless of its
+ // playback state if the context is closed.
+ if (context()->IsContextClosed())
+ return false;
+
+ // If a node is scheduled or playing, do not collect the node prematurely
+ // even its reference is out of scope. Then fire onended event if assigned.
+ return GetAudioScheduledSourceHandler().IsPlayingOrScheduled();
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_scheduled_source_node.h b/chromium/third_party/blink/renderer/modules/webaudio/audio_scheduled_source_node.h
new file mode 100644
index 00000000000..e1fa3e4c686
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_scheduled_source_node.h
@@ -0,0 +1,169 @@
+/*
+ * Copyright (C) 2012 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_SCHEDULED_SOURCE_NODE_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_SCHEDULED_SOURCE_NODE_H_
+
+#include "third_party/blink/renderer/bindings/core/v8/active_script_wrappable.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node.h"
+
+namespace blink {
+
+class BaseAudioContext;
+class AudioBus;
+
+class AudioScheduledSourceHandler : public AudioHandler {
+ public:
+ // These are the possible states an AudioScheduledSourceNode can be in:
+ //
+ // UNSCHEDULED_STATE - Initial playback state. Created, but not yet scheduled.
+ // SCHEDULED_STATE - Scheduled to play (via start()), but not yet playing.
+ // PLAYING_STATE - Generating sound.
+ // FINISHED_STATE - Finished generating sound.
+ //
+ // The state can only transition to the next state, except for the
+ // FINISHED_STATE which can never be changed.
+ enum PlaybackState {
+ // These must be defined with the same names and values as in the .idl file.
+ UNSCHEDULED_STATE = 0,
+ SCHEDULED_STATE = 1,
+ PLAYING_STATE = 2,
+ FINISHED_STATE = 3
+ };
+
+ AudioScheduledSourceHandler(NodeType, AudioNode&, float sample_rate);
+
+ // Scheduling.
+ void Start(double when, ExceptionState&);
+ void Stop(double when, ExceptionState&);
+
+ // AudioNode
+ double TailTime() const override { return 0; }
+ double LatencyTime() const override { return 0; }
+
+ PlaybackState GetPlaybackState() const {
+ return static_cast<PlaybackState>(AcquireLoad(&playback_state_));
+ }
+
+ void SetPlaybackState(PlaybackState new_state) {
+ ReleaseStore(&playback_state_, new_state);
+ }
+
+ bool IsPlayingOrScheduled() const {
+ PlaybackState state = GetPlaybackState();
+ return state == PLAYING_STATE || state == SCHEDULED_STATE;
+ }
+
+ bool HasFinished() const { return GetPlaybackState() == FINISHED_STATE; }
+
+ // Source nodes don't have tail or latency times so no tail
+ // processing needed.
+ bool RequiresTailProcessing() const final { return false; }
+
+ protected:
+ // Get frame information for the current time quantum.
+ // We handle the transition into PLAYING_STATE and FINISHED_STATE here,
+ // zeroing out portions of the outputBus which are outside the range of
+ // startFrame and endFrame.
+ //
+ // Each frame time is relative to the context's currentSampleFrame().
+ // quantumFrameOffset : Offset frame in this time quantum to start
+ // rendering.
+ // nonSilentFramesToProcess : Number of frames rendering non-silence (will be
+ // <= quantumFrameSize).
+ // startFrameOffset : The fractional frame offset from quantumFrameOffset
+ // and the actual starting time of the source. This is
+ // non-zero only when transitioning from the
+ // SCHEDULED_STATE to the PLAYING_STATE.
+ void UpdateSchedulingInfo(size_t quantum_frame_size,
+ AudioBus* output_bus,
+ size_t& quantum_frame_offset,
+ size_t& non_silent_frames_to_process,
+ double& start_frame_offset);
+
+ // Called when we have no more sound to play or the stop() time has been
+ // reached. No onEnded event is called.
+ virtual void FinishWithoutOnEnded();
+
+ // Like finishWithoutOnEnded(), but an onEnded (if specified) is called.
+ virtual void Finish();
+
+ void NotifyEnded();
+
+ // This synchronizes with process() and any other method that needs to be
+ // synchronized like setBuffer for AudioBufferSource.
+ mutable Mutex process_lock_;
+
+ // m_startTime is the time to start playing based on the context's timeline (0
+ // or a time less than the context's current time means "now").
+ double start_time_; // in seconds
+
+ // m_endTime is the time to stop playing based on the context's timeline (0 or
+ // a time less than the context's current time means "now"). If it hasn't
+ // been set explicitly, then the sound will not stop playing (if looping) or
+ // will stop when the end of the AudioBuffer has been reached.
+ double end_time_; // in seconds
+
+ static const double kUnknownTime;
+
+ private:
+ // This is accessed by both the main thread and audio thread. Use the setter
+ // and getter to protect the access to this.
+ int playback_state_;
+
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+};
+
+class AudioScheduledSourceNode
+ : public AudioNode,
+ public ActiveScriptWrappable<AudioScheduledSourceNode> {
+ USING_GARBAGE_COLLECTED_MIXIN(AudioScheduledSourceNode);
+ DEFINE_WRAPPERTYPEINFO();
+
+ public:
+ void start(ExceptionState&);
+ void start(double when, ExceptionState&);
+ void stop(ExceptionState&);
+ void stop(double when, ExceptionState&);
+
+ EventListener* onended();
+ void setOnended(EventListener*);
+
+ // ScriptWrappable:
+ bool HasPendingActivity() const final;
+
+ virtual void Trace(blink::Visitor* visitor) { AudioNode::Trace(visitor); }
+
+ protected:
+ explicit AudioScheduledSourceNode(BaseAudioContext&);
+ AudioScheduledSourceHandler& GetAudioScheduledSourceHandler() const;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_SCHEDULED_SOURCE_NODE_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_scheduled_source_node.idl b/chromium/third_party/blink/renderer/modules/webaudio/audio_scheduled_source_node.idl
new file mode 100644
index 00000000000..07466238646
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_scheduled_source_node.idl
@@ -0,0 +1,13 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See https://webaudio.github.io/web-audio-api/#AudioScheduledSourceNode
+[
+ ActiveScriptWrappable
+]
+interface AudioScheduledSourceNode : AudioNode {
+ [RaisesException] void start(optional double when);
+ [RaisesException] void stop(optional double when);
+ attribute EventHandler onended;
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_summing_junction.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_summing_junction.cc
new file mode 100644
index 00000000000..4146dd2e74f
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_summing_junction.cc
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2012, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#include <algorithm>
+#include "third_party/blink/renderer/modules/webaudio/audio_node_output.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_summing_junction.h"
+
+namespace blink {
+
+AudioSummingJunction::AudioSummingJunction(DeferredTaskHandler& handler)
+ : deferred_task_handler_(&handler), rendering_state_need_updating_(false) {}
+
+AudioSummingJunction::~AudioSummingJunction() {
+ GetDeferredTaskHandler().RemoveMarkedSummingJunction(this);
+}
+
+void AudioSummingJunction::ChangedOutputs() {
+ DCHECK(GetDeferredTaskHandler().IsGraphOwner());
+ if (!rendering_state_need_updating_) {
+ GetDeferredTaskHandler().MarkSummingJunctionDirty(this);
+ rendering_state_need_updating_ = true;
+ }
+}
+
+void AudioSummingJunction::UpdateRenderingState() {
+ DCHECK(GetDeferredTaskHandler().IsAudioThread());
+ DCHECK(GetDeferredTaskHandler().IsGraphOwner());
+ if (rendering_state_need_updating_) {
+ // Copy from m_outputs to m_renderingOutputs.
+ rendering_outputs_.resize(outputs_.size());
+ unsigned j = 0;
+ for (AudioNodeOutput* output : outputs_) {
+ rendering_outputs_[j++] = output;
+ output->UpdateRenderingState();
+ }
+
+ DidUpdate();
+
+ rendering_state_need_updating_ = false;
+ }
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_summing_junction.h b/chromium/third_party/blink/renderer/modules/webaudio/audio_summing_junction.h
new file mode 100644
index 00000000000..628c1461ffd
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_summing_junction.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2012, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_SUMMING_JUNCTION_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_SUMMING_JUNCTION_H_
+
+#include "third_party/blink/renderer/platform/audio/audio_bus.h"
+#include "third_party/blink/renderer/platform/heap/handle.h"
+#include "third_party/blink/renderer/platform/wtf/hash_set.h"
+#include "third_party/blink/renderer/platform/wtf/vector.h"
+
+namespace blink {
+
+class AudioNodeOutput;
+class DeferredTaskHandler;
+
+// An AudioSummingJunction represents a point where zero, one, or more
+// AudioNodeOutputs connect.
+
+class AudioSummingJunction {
+ public:
+ virtual ~AudioSummingJunction();
+
+ // Can be called from any thread.
+ DeferredTaskHandler& GetDeferredTaskHandler() const {
+ return *deferred_task_handler_;
+ }
+
+ // This must be called whenever we modify m_outputs.
+ void ChangedOutputs();
+
+ // This copies m_outputs to m_renderingOutputs. Please see comments for these
+ // lists below. This must be called when we own the context's graph lock in
+ // the audio thread at the very start or end of the render quantum.
+ void UpdateRenderingState();
+
+ // Rendering code accesses its version of the current connections here.
+ unsigned NumberOfRenderingConnections() const {
+ return rendering_outputs_.size();
+ }
+ AudioNodeOutput* RenderingOutput(unsigned i) { return rendering_outputs_[i]; }
+ bool IsConnected() const { return NumberOfRenderingConnections() > 0; }
+
+ virtual void DidUpdate() = 0;
+
+ protected:
+ explicit AudioSummingJunction(DeferredTaskHandler&);
+
+ scoped_refptr<DeferredTaskHandler> deferred_task_handler_;
+
+ // m_outputs contains the AudioNodeOutputs representing current connections
+ // which are not disabled. The rendering code should never use this
+ // directly, but instead uses m_renderingOutputs.
+ // These raw pointers are safe. Owner AudioNodes of these AudioNodeOutputs
+ // manage their lifetime, and AudioNode::dispose() disconnects all of
+ // connections.
+ HashSet<AudioNodeOutput*> outputs_;
+
+ // m_renderingOutputs is a copy of m_outputs which will never be modified
+ // during the graph rendering on the audio thread. This is the list which
+ // is used by the rendering code.
+ // Whenever m_outputs is modified, the context is told so it can later
+ // update m_renderingOutputs from m_outputs at a safe time. Most of the
+ // time, m_renderingOutputs is identical to m_outputs.
+ // These raw pointers are safe. Owner of this AudioSummingJunction has
+ // strong references to owners of these AudioNodeOutput.
+ Vector<AudioNodeOutput*> rendering_outputs_;
+
+ // m_renderingStateNeedUpdating keeps track if m_outputs is modified.
+ bool rendering_state_need_updating_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_SUMMING_JUNCTION_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_timestamp.idl b/chromium/third_party/blink/renderer/modules/webaudio/audio_timestamp.idl
new file mode 100644
index 00000000000..6a417f7945e
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_timestamp.idl
@@ -0,0 +1,9 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See https://webaudio.github.io/web-audio-api/#dictdef-audiotimestamp
+dictionary AudioTimestamp {
+ double contextTime;
+ DOMHighResTimeStamp performanceTime;
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet.cc
new file mode 100644
index 00000000000..38678861a93
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet.cc
@@ -0,0 +1,98 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/modules/webaudio/audio_worklet.h"
+
+#include "third_party/blink/renderer/bindings/core/v8/serialization/serialized_script_value.h"
+#include "third_party/blink/renderer/bindings/core/v8/v8_binding_for_core.h"
+#include "third_party/blink/renderer/core/dom/document.h"
+#include "third_party/blink/renderer/core/frame/use_counter.h"
+#include "third_party/blink/renderer/core/workers/worker_clients.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_worklet_messaging_proxy.h"
+#include "third_party/blink/renderer/modules/webaudio/base_audio_context.h"
+#include "third_party/blink/renderer/modules/webaudio/cross_thread_audio_worklet_processor_info.h"
+
+namespace blink {
+
+AudioWorklet* AudioWorklet::Create(BaseAudioContext* context) {
+ return new AudioWorklet(context);
+}
+
+AudioWorklet::AudioWorklet(BaseAudioContext* context)
+ : Worklet(ToDocument(context->GetExecutionContext())), context_(context) {}
+
+void AudioWorklet::CreateProcessor(
+ AudioWorkletHandler* handler,
+ MessagePortChannel message_port_channel,
+ scoped_refptr<SerializedScriptValue> node_options) {
+ DCHECK(IsMainThread());
+ DCHECK(GetMessagingProxy());
+ GetMessagingProxy()->CreateProcessor(handler,
+ std::move(message_port_channel),
+ std::move(node_options));
+}
+
+void AudioWorklet::NotifyGlobalScopeIsUpdated() {
+ DCHECK(IsMainThread());
+
+ if (!worklet_started_) {
+ context_->NotifyWorkletIsReady();
+ worklet_started_ = true;
+ }
+}
+
+BaseAudioContext* AudioWorklet::GetBaseAudioContext() const {
+ DCHECK(IsMainThread());
+ return context_.Get();
+}
+
+const Vector<CrossThreadAudioParamInfo>
+ AudioWorklet::GetParamInfoListForProcessor(
+ const String& name) {
+ DCHECK(IsMainThread());
+ DCHECK(GetMessagingProxy());
+ return GetMessagingProxy()->GetParamInfoListForProcessor(name);
+}
+
+bool AudioWorklet::IsProcessorRegistered(const String& name) {
+ DCHECK(IsMainThread());
+ DCHECK(GetMessagingProxy());
+ return GetMessagingProxy()->IsProcessorRegistered(name);
+}
+
+bool AudioWorklet::IsReady() {
+ DCHECK(IsMainThread());
+ return GetMessagingProxy() && GetMessagingProxy()->GetBackingWorkerThread();
+}
+
+bool AudioWorklet::NeedsToCreateGlobalScope() {
+ // This is a callback from |Worklet::FetchAndInvokeScript| call, which only
+ // can be triggered by Worklet.addModule() call.
+ UseCounter::Count(GetExecutionContext(), WebFeature::kAudioWorkletAddModule);
+
+ return GetNumberOfGlobalScopes() == 0;
+}
+
+WorkletGlobalScopeProxy* AudioWorklet::CreateGlobalScope() {
+ DCHECK_EQ(GetNumberOfGlobalScopes(), 0u);
+
+ AudioWorkletMessagingProxy* proxy =
+ new AudioWorkletMessagingProxy(GetExecutionContext(), this);
+ proxy->Initialize(WorkerClients::Create(), ModuleResponsesMap());
+ return proxy;
+}
+
+AudioWorkletMessagingProxy* AudioWorklet::GetMessagingProxy() {
+ return GetNumberOfGlobalScopes() == 0
+ ? nullptr
+ : static_cast<AudioWorkletMessagingProxy*>(
+ FindAvailableGlobalScope());
+}
+
+void AudioWorklet::Trace(blink::Visitor* visitor) {
+ visitor->Trace(context_);
+ Worklet::Trace(visitor);
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet.h b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet.h
new file mode 100644
index 00000000000..98b6c4672da
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet.h
@@ -0,0 +1,74 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_WORKLET_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_WORKLET_H_
+
+#include "third_party/blink/renderer/core/workers/worklet.h"
+#include "third_party/blink/renderer/modules/modules_export.h"
+#include "third_party/blink/renderer/platform/heap/handle.h"
+
+namespace blink {
+
+class AudioWorkletHandler;
+class AudioWorkletMessagingProxy;
+class BaseAudioContext;
+class CrossThreadAudioParamInfo;
+class MessagePortChannel;
+class SerializedScriptValue;
+
+class MODULES_EXPORT AudioWorklet final : public Worklet {
+ DEFINE_WRAPPERTYPEINFO();
+ USING_GARBAGE_COLLECTED_MIXIN(AudioWorklet);
+ WTF_MAKE_NONCOPYABLE(AudioWorklet);
+
+ public:
+ // When the AudioWorklet runtime flag is not enabled, this constructor returns
+ // |nullptr|.
+ static AudioWorklet* Create(BaseAudioContext*);
+
+ ~AudioWorklet() = default;
+
+ void CreateProcessor(AudioWorkletHandler*,
+ MessagePortChannel,
+ scoped_refptr<SerializedScriptValue> node_options);
+
+ // Invoked by AudioWorkletMessagingProxy. Notifies |context_| when
+ // AudioWorkletGlobalScope finishes the first script evaluation and is ready
+ // for the worklet operation. Can be used for other post-evaluation tasks
+ // in AudioWorklet or BaseAudioContext.
+ void NotifyGlobalScopeIsUpdated();
+
+ BaseAudioContext* GetBaseAudioContext() const;
+
+ // Returns |nullptr| if there is no active WorkletGlobalScope().
+ AudioWorkletMessagingProxy* GetMessagingProxy();
+
+ const Vector<CrossThreadAudioParamInfo> GetParamInfoListForProcessor(
+ const String& name);
+
+ bool IsProcessorRegistered(const String& name);
+
+ // Returns |true| when a AudioWorkletMessagingProxy and a WorkletBackingThread
+ // are ready.
+ bool IsReady();
+
+ void Trace(blink::Visitor*) override;
+
+ private:
+ explicit AudioWorklet(BaseAudioContext*);
+
+ // Implements Worklet
+ bool NeedsToCreateGlobalScope() final;
+ WorkletGlobalScopeProxy* CreateGlobalScope() final;
+
+ // To catch the first global scope update and notify the context.
+ bool worklet_started_ = false;
+
+ Member<BaseAudioContext> context_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_WORKLET_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet.idl b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet.idl
new file mode 100644
index 00000000000..e24cf629d2e
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet.idl
@@ -0,0 +1,10 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See https://webaudio.github.io/web-audio-api/#audioworklet
+
+[
+ SecureContext
+] interface AudioWorklet : Worklet {
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_global_scope.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_global_scope.cc
new file mode 100644
index 00000000000..b24c49d5858
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_global_scope.cc
@@ -0,0 +1,410 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/modules/webaudio/audio_worklet_global_scope.h"
+
+#include <memory>
+#include <utility>
+
+#include "third_party/blink/renderer/bindings/core/v8/idl_types.h"
+#include "third_party/blink/renderer/bindings/core/v8/native_value_traits_impl.h"
+#include "third_party/blink/renderer/bindings/core/v8/serialization/serialized_script_value.h"
+#include "third_party/blink/renderer/bindings/core/v8/to_v8_for_core.h"
+#include "third_party/blink/renderer/bindings/core/v8/v8_binding_for_core.h"
+#include "third_party/blink/renderer/bindings/core/v8/v8_object_builder.h"
+#include "third_party/blink/renderer/bindings/core/v8/v8_object_parser.h"
+#include "third_party/blink/renderer/bindings/core/v8/worker_or_worklet_script_controller.h"
+#include "third_party/blink/renderer/bindings/modules/v8/v8_audio_param_descriptor.h"
+#include "third_party/blink/renderer/bindings/modules/v8/v8_audio_worklet_processor.h"
+#include "third_party/blink/renderer/core/dom/exception_code.h"
+#include "third_party/blink/renderer/core/messaging/message_port.h"
+#include "third_party/blink/renderer/core/typed_arrays/dom_typed_array.h"
+#include "third_party/blink/renderer/core/workers/global_scope_creation_params.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_buffer.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_param_descriptor.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_worklet_processor.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_worklet_processor_definition.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_worklet_processor_error_state.h"
+#include "third_party/blink/renderer/modules/webaudio/cross_thread_audio_worklet_processor_info.h"
+#include "third_party/blink/renderer/platform/audio/audio_bus.h"
+#include "third_party/blink/renderer/platform/audio/audio_utilities.h"
+#include "third_party/blink/renderer/platform/bindings/v8_binding_macros.h"
+#include "third_party/blink/renderer/platform/bindings/v8_object_constructor.h"
+#include "third_party/blink/renderer/platform/weborigin/security_origin.h"
+
+namespace blink {
+
+AudioWorkletGlobalScope* AudioWorkletGlobalScope::Create(
+ std::unique_ptr<GlobalScopeCreationParams> creation_params,
+ v8::Isolate* isolate,
+ WorkerThread* thread) {
+ return new AudioWorkletGlobalScope(std::move(creation_params), isolate,
+ thread);
+}
+
+AudioWorkletGlobalScope::AudioWorkletGlobalScope(
+ std::unique_ptr<GlobalScopeCreationParams> creation_params,
+ v8::Isolate* isolate,
+ WorkerThread* thread)
+ : ThreadedWorkletGlobalScope(std::move(creation_params), isolate, thread) {}
+
+AudioWorkletGlobalScope::~AudioWorkletGlobalScope() = default;
+
+void AudioWorkletGlobalScope::Dispose() {
+ DCHECK(IsContextThread());
+ is_closing_ = true;
+ ThreadedWorkletGlobalScope::Dispose();
+}
+
+void AudioWorkletGlobalScope::registerProcessor(
+ const String& name,
+ const ScriptValue& class_definition,
+ ExceptionState& exception_state) {
+ DCHECK(IsContextThread());
+
+ if (processor_definition_map_.Contains(name)) {
+ exception_state.ThrowDOMException(
+ kNotSupportedError,
+ "A class with name:'" + name + "' is already registered.");
+ return;
+ }
+
+ // TODO(hongchan): this is not stated in the spec, but seems necessary.
+ // https://github.com/WebAudio/web-audio-api/issues/1172
+ if (name.IsEmpty()) {
+ exception_state.ThrowTypeError("The empty string is not a valid name.");
+ return;
+ }
+
+ v8::Isolate* isolate = ScriptController()->GetScriptState()->GetIsolate();
+ v8::Local<v8::Context> context = ScriptController()->GetContext();
+
+ DCHECK(class_definition.V8Value()->IsFunction());
+ v8::Local<v8::Function> constructor =
+ v8::Local<v8::Function>::Cast(class_definition.V8Value());
+
+ v8::Local<v8::Object> prototype;
+ if (!V8ObjectParser::ParsePrototype(context, constructor, &prototype,
+ &exception_state))
+ return;
+
+ v8::Local<v8::Function> process;
+ if (!V8ObjectParser::ParseFunction(context, prototype, "process", &process,
+ &exception_state))
+ return;
+
+ // constructor() and process() functions are successfully parsed from the
+ // script code, thus create the definition. The rest of parsing process
+ // (i.e. parameterDescriptors) is optional.
+ AudioWorkletProcessorDefinition* definition =
+ AudioWorkletProcessorDefinition::Create(isolate, name, constructor,
+ process);
+ DCHECK(definition);
+
+ v8::Local<v8::Value> parameter_descriptors_value_local;
+ bool did_get_parameter_descriptor =
+ constructor->Get(context, V8AtomicString(isolate, "parameterDescriptors"))
+ .ToLocal(&parameter_descriptors_value_local);
+
+ // If parameterDescriptor() is parsed and has a valid value, create a vector
+ // of |AudioParamDescriptor| and pass it to the definition.
+ if (did_get_parameter_descriptor &&
+ !parameter_descriptors_value_local->IsNullOrUndefined()) {
+ HeapVector<AudioParamDescriptor> audio_param_descriptors =
+ NativeValueTraits<IDLSequence<AudioParamDescriptor>>::NativeValue(
+ isolate, parameter_descriptors_value_local, exception_state);
+
+ if (exception_state.HadException())
+ return;
+
+ definition->SetAudioParamDescriptors(audio_param_descriptors);
+ }
+
+ processor_definition_map_.Set(name, definition);
+}
+
+AudioWorkletProcessor* AudioWorkletGlobalScope::CreateProcessor(
+ const String& name,
+ MessagePortChannel message_port_channel,
+ scoped_refptr<SerializedScriptValue> node_options) {
+ DCHECK(IsContextThread());
+
+ // The registered definition is already checked by AudioWorkletNode
+ // construction process, so the |definition| here must be valid.
+ AudioWorkletProcessorDefinition* definition = FindDefinition(name);
+ DCHECK(definition);
+
+ ScriptState* script_state = ScriptController()->GetScriptState();
+ ScriptState::Scope scope(script_state);
+
+ // V8 object instance construction: this construction process is here to make
+ // the AudioWorkletProcessor class a thin wrapper of V8::Object instance.
+ v8::Isolate* isolate = script_state->GetIsolate();
+ v8::TryCatch block(isolate);
+
+ // Routes errors/exceptions to the dev console.
+ block.SetVerbose(true);
+
+ DCHECK(!processor_creation_params_);
+ processor_creation_params_ = std::make_unique<ProcessorCreationParams>(
+ name, std::move(message_port_channel));
+
+ v8::Local<v8::Value> argv[] = {
+ ToV8(node_options->Deserialize(isolate),
+ script_state->GetContext()->Global(),
+ isolate)
+ };
+
+ // This invokes the static constructor of AudioWorkletProcessor. There is no
+ // way to pass additional constructor arguments that are not described in
+ // WebIDL, the static constructor will look up |processor_creation_params_| in
+ // the global scope to perform the construction properly.
+ v8::Local<v8::Value> result;
+ bool did_construct =
+ V8ScriptRunner::CallAsConstructor(isolate,
+ definition->ConstructorLocal(isolate),
+ ExecutionContext::From(script_state),
+ WTF_ARRAY_LENGTH(argv),
+ argv)
+ .ToLocal(&result);
+ processor_creation_params_.reset();
+
+ // If 1) the attempt to call the constructor fails, 2) an error was thrown
+ // by the user-supplied constructor code. The invalid construction process
+ if (!did_construct || block.HasCaught()) {
+ return nullptr;
+ }
+
+ // ToImplWithTypeCheck() may return nullptr when the type does not match.
+ AudioWorkletProcessor* processor =
+ V8AudioWorkletProcessor::ToImplWithTypeCheck(isolate, result);
+
+ if (processor) {
+ processor_instances_.push_back(processor);
+ }
+
+ return processor;
+}
+
+bool AudioWorkletGlobalScope::Process(
+ AudioWorkletProcessor* processor,
+ Vector<AudioBus*>* input_buses,
+ Vector<AudioBus*>* output_buses,
+ HashMap<String, std::unique_ptr<AudioFloatArray>>* param_value_map) {
+ CHECK_GE(input_buses->size(), 0u);
+ CHECK_GE(output_buses->size(), 0u);
+
+ ScriptState* script_state = ScriptController()->GetScriptState();
+ ScriptState::Scope scope(script_state);
+
+ v8::Isolate* isolate = script_state->GetIsolate();
+ v8::Local<v8::Context> current_context = script_state->GetContext();
+ AudioWorkletProcessorDefinition* definition =
+ FindDefinition(processor->Name());
+ DCHECK(definition);
+
+ v8::TryCatch block(isolate);
+ block.SetVerbose(true);
+
+ // Prepare arguments of JS callback (inputs, outputs and param_values) with
+ // directly using V8 API because the overhead of
+ // ToV8(HeapVector<HeapVector<DOMFloat32Array>>) is not negligible and there
+ // is no need to externalize the array buffers.
+
+ // 1st arg of JS callback: inputs
+ v8::Local<v8::Array> inputs = v8::Array::New(isolate, input_buses->size());
+ uint32_t input_bus_index = 0;
+ for (const auto input_bus : *input_buses) {
+ // If |input_bus| is null, then the input is not connected, and
+ // the array for that input should have one channel and a length
+ // of 0.
+ unsigned number_of_channels = input_bus ? input_bus->NumberOfChannels() : 1;
+ size_t bus_length = input_bus ? input_bus->length() : 0;
+
+ v8::Local<v8::Array> channels = v8::Array::New(isolate, number_of_channels);
+ bool success;
+ if (!inputs
+ ->CreateDataProperty(current_context, input_bus_index++, channels)
+ .To(&success)) {
+ return false;
+ }
+ for (uint32_t channel_index = 0; channel_index < number_of_channels;
+ ++channel_index) {
+ v8::Local<v8::ArrayBuffer> array_buffer =
+ v8::ArrayBuffer::New(isolate, bus_length * sizeof(float));
+ v8::Local<v8::Float32Array> float32_array =
+ v8::Float32Array::New(array_buffer, 0, bus_length);
+ if (!channels
+ ->CreateDataProperty(current_context, channel_index,
+ float32_array)
+ .To(&success)) {
+ return false;
+ }
+ const v8::ArrayBuffer::Contents& contents = array_buffer->GetContents();
+ if (input_bus) {
+ memcpy(contents.Data(), input_bus->Channel(channel_index)->Data(),
+ bus_length * sizeof(float));
+ }
+ }
+ }
+
+ // 2nd arg of JS callback: outputs
+ v8::Local<v8::Array> outputs = v8::Array::New(isolate, output_buses->size());
+ uint32_t output_bus_index = 0;
+ // |js_output_raw_ptrs| stores raw pointers to underlying array buffers so
+ // that we can copy them back to |output_buses|. The raw pointers are valid
+ // as long as the v8::ArrayBuffers are alive, i.e. as long as |outputs| is
+ // holding v8::ArrayBuffers.
+ Vector<Vector<void*>> js_output_raw_ptrs;
+ js_output_raw_ptrs.ReserveInitialCapacity(output_buses->size());
+ for (const auto& output_bus : *output_buses) {
+ js_output_raw_ptrs.UncheckedAppend(Vector<void*>());
+ js_output_raw_ptrs.back().ReserveInitialCapacity(
+ output_bus->NumberOfChannels());
+ v8::Local<v8::Array> channels =
+ v8::Array::New(isolate, output_bus->NumberOfChannels());
+ bool success;
+ if (!outputs
+ ->CreateDataProperty(current_context, output_bus_index++, channels)
+ .To(&success)) {
+ return false;
+ }
+ for (uint32_t channel_index = 0;
+ channel_index < output_bus->NumberOfChannels(); ++channel_index) {
+ v8::Local<v8::ArrayBuffer> array_buffer =
+ v8::ArrayBuffer::New(isolate, output_bus->length() * sizeof(float));
+ v8::Local<v8::Float32Array> float32_array =
+ v8::Float32Array::New(array_buffer, 0, output_bus->length());
+ if (!channels
+ ->CreateDataProperty(current_context, channel_index,
+ float32_array)
+ .To(&success)) {
+ return false;
+ }
+ const v8::ArrayBuffer::Contents& contents = array_buffer->GetContents();
+ js_output_raw_ptrs.back().UncheckedAppend(contents.Data());
+ }
+ }
+
+ // 3rd arg of JS callback: param_values
+ v8::Local<v8::Object> param_values = v8::Object::New(isolate);
+ for (const auto& param : *param_value_map) {
+ const String& param_name = param.key;
+ const AudioFloatArray* param_array = param.value.get();
+ v8::Local<v8::ArrayBuffer> array_buffer =
+ v8::ArrayBuffer::New(isolate, param_array->size() * sizeof(float));
+ v8::Local<v8::Float32Array> float32_array =
+ v8::Float32Array::New(array_buffer, 0, param_array->size());
+ bool success;
+ if (!param_values
+ ->CreateDataProperty(current_context,
+ V8String(isolate, param_name.IsolatedCopy()),
+ float32_array)
+ .To(&success)) {
+ return false;
+ }
+ const v8::ArrayBuffer::Contents& contents = array_buffer->GetContents();
+ memcpy(contents.Data(), param_array->Data(),
+ param_array->size() * sizeof(float));
+ }
+
+ v8::Local<v8::Value> argv[] = {inputs, outputs, param_values};
+
+ // Perform JS function process() in AudioWorkletProcessor instance. The actual
+ // V8 operation happens here to make the AudioWorkletProcessor class a thin
+ // wrapper of v8::Object instance.
+ v8::Local<v8::Value> processor_handle = ToV8(processor, script_state);
+ v8::Local<v8::Value> local_result;
+ if (!V8ScriptRunner::CallFunction(definition->ProcessLocal(isolate),
+ ExecutionContext::From(script_state),
+ processor_handle, WTF_ARRAY_LENGTH(argv),
+ argv, isolate)
+ .ToLocal(&local_result) ||
+ block.HasCaught()) {
+ // process() method call method call failed for some reason or an exception
+ // was thrown by the user supplied code. Disable the processor to exclude
+ // it from the subsequent rendering task.
+ processor->SetErrorState(AudioWorkletProcessorErrorState::kProcessError);
+ return false;
+ }
+
+ // TODO(hongchan): Sanity check on length, number of channels, and object
+ // type.
+
+ // Copy |sequence<sequence<Float32Array>>| back to the original
+ // |Vector<AudioBus*>|.
+ for (uint32_t output_bus_index = 0; output_bus_index < output_buses->size();
+ ++output_bus_index) {
+ AudioBus* output_bus = (*output_buses)[output_bus_index];
+ for (uint32_t channel_index = 0;
+ channel_index < output_bus->NumberOfChannels(); ++channel_index) {
+ memcpy(output_bus->Channel(channel_index)->MutableData(),
+ js_output_raw_ptrs[output_bus_index][channel_index],
+ output_bus->length() * sizeof(float));
+ }
+ }
+
+ // Return the value from the user-supplied |process()| function. It is
+ // used to maintain the lifetime of the node and the processor.
+ return local_result->IsTrue() && !block.HasCaught();
+}
+
+AudioWorkletProcessorDefinition* AudioWorkletGlobalScope::FindDefinition(
+ const String& name) {
+ return processor_definition_map_.at(name);
+}
+
+unsigned AudioWorkletGlobalScope::NumberOfRegisteredDefinitions() {
+ return processor_definition_map_.size();
+}
+
+std::unique_ptr<Vector<CrossThreadAudioWorkletProcessorInfo>>
+AudioWorkletGlobalScope::WorkletProcessorInfoListForSynchronization() {
+ auto processor_info_list =
+ std::make_unique<Vector<CrossThreadAudioWorkletProcessorInfo>>();
+ for (auto definition_entry : processor_definition_map_) {
+ if (!definition_entry.value->IsSynchronized()) {
+ definition_entry.value->MarkAsSynchronized();
+ processor_info_list->emplace_back(*definition_entry.value);
+ }
+ }
+ return processor_info_list;
+}
+
+ProcessorCreationParams* AudioWorkletGlobalScope::GetProcessorCreationParams() {
+ return processor_creation_params_.get();
+}
+
+void AudioWorkletGlobalScope::SetCurrentFrame(size_t current_frame) {
+ current_frame_ = current_frame;
+}
+
+void AudioWorkletGlobalScope::SetSampleRate(float sample_rate) {
+ sample_rate_ = sample_rate;
+}
+
+double AudioWorkletGlobalScope::currentTime() const {
+ return sample_rate_ > 0.0
+ ? current_frame_ / static_cast<double>(sample_rate_)
+ : 0.0;
+}
+
+void AudioWorkletGlobalScope::Trace(blink::Visitor* visitor) {
+ visitor->Trace(processor_definition_map_);
+ visitor->Trace(processor_instances_);
+ ThreadedWorkletGlobalScope::Trace(visitor);
+}
+
+void AudioWorkletGlobalScope::TraceWrappers(
+ const ScriptWrappableVisitor* visitor) const {
+ for (auto definition : processor_definition_map_)
+ visitor->TraceWrappers(definition.value);
+
+ for (auto processor : processor_instances_)
+ visitor->TraceWrappers(processor);
+
+ ThreadedWorkletGlobalScope::TraceWrappers(visitor);
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_global_scope.h b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_global_scope.h
new file mode 100644
index 00000000000..a83ea97a3b6
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_global_scope.h
@@ -0,0 +1,141 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_WORKLET_GLOBAL_SCOPE_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_WORKLET_GLOBAL_SCOPE_H_
+
+#include "third_party/blink/renderer/bindings/core/v8/script_value.h"
+#include "third_party/blink/renderer/core/execution_context/execution_context.h"
+#include "third_party/blink/renderer/core/workers/threaded_worklet_global_scope.h"
+#include "third_party/blink/renderer/modules/modules_export.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_param_descriptor.h"
+#include "third_party/blink/renderer/platform/audio/audio_array.h"
+#include "third_party/blink/renderer/platform/bindings/script_wrappable.h"
+
+namespace blink {
+
+class AudioBus;
+class AudioWorkletProcessor;
+class AudioWorkletProcessorDefinition;
+class CrossThreadAudioWorkletProcessorInfo;
+class ExceptionState;
+class MessagePortChannel;
+class SerializedScriptValue;
+struct GlobalScopeCreationParams;
+
+
+// The storage for the construction of AudioWorkletProcessor, contains the
+// processor name and MessageChannelPort object.
+class MODULES_EXPORT ProcessorCreationParams final {
+ public:
+ ProcessorCreationParams(const String& name,
+ MessagePortChannel message_port_channel)
+ : name_(name), message_port_channel_(message_port_channel) {}
+
+ ~ProcessorCreationParams() = default;
+
+ const String& Name() const { return name_; }
+ MessagePortChannel PortChannel() { return message_port_channel_; }
+
+ private:
+ const String name_;
+ MessagePortChannel message_port_channel_;
+};
+
+
+// This is constructed and destroyed on a worker thread, and all methods also
+// must be called on the worker thread.
+class MODULES_EXPORT AudioWorkletGlobalScope final
+ : public ThreadedWorkletGlobalScope {
+ DEFINE_WRAPPERTYPEINFO();
+
+ public:
+ static AudioWorkletGlobalScope* Create(
+ std::unique_ptr<GlobalScopeCreationParams>,
+ v8::Isolate*,
+ WorkerThread*);
+ ~AudioWorkletGlobalScope() override;
+ bool IsAudioWorkletGlobalScope() const final { return true; }
+ void Dispose() final;
+ bool IsClosing() const final { return is_closing_; }
+
+ void registerProcessor(const String& name,
+ const ScriptValue& class_definition,
+ ExceptionState&);
+
+ // Creates an instance of AudioWorkletProcessor from a registered name.
+ // This is invoked by AudioWorkletMessagingProxy upon the construction of
+ // AudioWorkletNode.
+ //
+ // This function may return nullptr when a new V8 object cannot be constructed
+ // for some reason.
+ AudioWorkletProcessor* CreateProcessor(
+ const String& name,
+ MessagePortChannel,
+ scoped_refptr<SerializedScriptValue> node_options);
+
+ // Invokes the JS audio processing function from an instance of
+ // AudioWorkletProcessor, along with given AudioBuffer from the audio graph.
+ bool Process(
+ AudioWorkletProcessor*,
+ Vector<AudioBus*>* input_buses,
+ Vector<AudioBus*>* output_buses,
+ HashMap<String, std::unique_ptr<AudioFloatArray>>* param_value_map);
+
+ AudioWorkletProcessorDefinition* FindDefinition(const String& name);
+
+ unsigned NumberOfRegisteredDefinitions();
+
+ std::unique_ptr<Vector<CrossThreadAudioWorkletProcessorInfo>>
+ WorkletProcessorInfoListForSynchronization();
+
+ // Gets |processor_creation_params_| for the processor construction. If there
+ // is no on-going processor construction, this MUST return nullptr.
+ ProcessorCreationParams* GetProcessorCreationParams();
+
+ void SetCurrentFrame(size_t current_frame);
+ void SetSampleRate(float sample_rate);
+
+ // IDL
+ unsigned long long currentFrame() const { return current_frame_; }
+ double currentTime() const;
+ float sampleRate() const { return sample_rate_; }
+
+ void Trace(blink::Visitor*);
+ void TraceWrappers(const ScriptWrappableVisitor*) const;
+
+ private:
+ AudioWorkletGlobalScope(std::unique_ptr<GlobalScopeCreationParams>,
+ v8::Isolate*,
+ WorkerThread*);
+
+ bool is_closing_ = false;
+
+ typedef HeapHashMap<String,
+ TraceWrapperMember<AudioWorkletProcessorDefinition>>
+ ProcessorDefinitionMap;
+ typedef HeapVector<TraceWrapperMember<AudioWorkletProcessor>>
+ ProcessorInstances;
+
+ ProcessorDefinitionMap processor_definition_map_;
+ ProcessorInstances processor_instances_;
+
+ // Gets set when the processor construction is invoked, and cleared out after
+ // the construction. See the comment in |CreateProcessor()| method for the
+ // detail.
+ std::unique_ptr<ProcessorCreationParams> processor_creation_params_;
+
+ size_t current_frame_ = 0;
+ float sample_rate_ = 0.0;
+};
+
+DEFINE_TYPE_CASTS(AudioWorkletGlobalScope,
+ ExecutionContext,
+ context,
+ context->IsAudioWorkletGlobalScope(),
+ context.IsAudioWorkletGlobalScope());
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_WORKLET_GLOBAL_SCOPE_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_global_scope.idl b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_global_scope.idl
new file mode 100644
index 00000000000..ccdbf87c183
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_global_scope.idl
@@ -0,0 +1,15 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See https://webaudio.github.io/web-audio-api/#audioworkletglobalscope
+
+[
+ Exposed=AudioWorklet,
+ Global=(Worklet,AudioWorklet)
+] interface AudioWorkletGlobalScope : WorkletGlobalScope {
+ [RaisesException, MeasureAs=AudioWorkletGlobalScopeRegisterProcessor] void registerProcessor(DOMString name, Function processorConstructor);
+ readonly attribute unsigned long long currentFrame;
+ readonly attribute double currentTime;
+ readonly attribute float sampleRate;
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_global_scope_test.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_global_scope_test.cc
new file mode 100644
index 00000000000..a7ef777545d
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_global_scope_test.cc
@@ -0,0 +1,399 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/modules/webaudio/audio_worklet_global_scope.h"
+
+#include <memory>
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/blink/public/platform/task_type.h"
+#include "third_party/blink/public/platform/web_url_request.h"
+#include "third_party/blink/renderer/bindings/core/v8/script_module.h"
+#include "third_party/blink/renderer/bindings/core/v8/script_source_code.h"
+#include "third_party/blink/renderer/bindings/core/v8/script_value.h"
+#include "third_party/blink/renderer/bindings/core/v8/serialization/serialized_script_value.h"
+#include "third_party/blink/renderer/bindings/core/v8/source_location.h"
+#include "third_party/blink/renderer/bindings/core/v8/to_v8_for_core.h"
+#include "third_party/blink/renderer/bindings/core/v8/v8_binding_for_core.h"
+#include "third_party/blink/renderer/bindings/core/v8/v8_binding_for_testing.h"
+#include "third_party/blink/renderer/bindings/core/v8/v8_cache_options.h"
+#include "third_party/blink/renderer/bindings/core/v8/v8_gc_controller.h"
+#include "third_party/blink/renderer/bindings/core/v8/worker_or_worklet_script_controller.h"
+#include "third_party/blink/renderer/core/dom/document.h"
+#include "third_party/blink/renderer/core/messaging/message_channel.h"
+#include "third_party/blink/renderer/core/messaging/message_port.h"
+#include "third_party/blink/renderer/core/origin_trials/origin_trial_context.h"
+#include "third_party/blink/renderer/core/testing/page_test_base.h"
+#include "third_party/blink/renderer/core/workers/global_scope_creation_params.h"
+#include "third_party/blink/renderer/core/workers/worker_backing_thread.h"
+#include "third_party/blink/renderer/core/workers/worker_inspector_proxy.h"
+#include "third_party/blink/renderer/core/workers/worker_reporting_proxy.h"
+#include "third_party/blink/renderer/core/workers/worklet_module_responses_map.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_buffer.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_worklet_processor.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_worklet_processor_definition.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_worklet_thread.h"
+#include "third_party/blink/renderer/platform/audio/audio_bus.h"
+#include "third_party/blink/renderer/platform/bindings/script_state.h"
+#include "third_party/blink/renderer/platform/bindings/v8_binding_macros.h"
+#include "third_party/blink/renderer/platform/bindings/v8_object_constructor.h"
+#include "third_party/blink/renderer/platform/loader/fetch/access_control_status.h"
+#include "third_party/blink/renderer/platform/loader/fetch/resource_loader_options.h"
+#include "third_party/blink/renderer/platform/weborigin/security_origin.h"
+#include "third_party/blink/renderer/platform/wtf/text/text_position.h"
+
+namespace blink {
+
+namespace {
+
+static const size_t kRenderQuantumFrames = 128;
+
+} // namespace
+
+class AudioWorkletGlobalScopeTest : public PageTestBase {
+ public:
+ void SetUp() override {
+ AudioWorkletThread::CreateSharedBackingThreadForTest();
+ PageTestBase::SetUp(IntSize());
+ Document* document = &GetDocument();
+ document->SetURL(KURL("https://example.com/"));
+ document->UpdateSecurityOrigin(SecurityOrigin::Create(document->Url()));
+ reporting_proxy_ = std::make_unique<WorkerReportingProxy>();
+ }
+
+ std::unique_ptr<AudioWorkletThread> CreateAudioWorkletThread() {
+ std::unique_ptr<AudioWorkletThread> thread =
+ AudioWorkletThread::Create(nullptr, *reporting_proxy_);
+ Document* document = &GetDocument();
+ thread->Start(
+ std::make_unique<GlobalScopeCreationParams>(
+ document->Url(), document->UserAgent(),
+ nullptr /* content_security_policy_parsed_headers */,
+ document->GetReferrerPolicy(), document->GetSecurityOrigin(),
+ document->IsSecureContext(), nullptr /* worker_clients */,
+ document->AddressSpace(),
+ OriginTrialContext::GetTokens(document).get(),
+ base::UnguessableToken::Create(), nullptr /* worker_settings */,
+ kV8CacheOptionsDefault,
+ new WorkletModuleResponsesMap(document->Fetcher())),
+ WTF::nullopt, WorkerInspectorProxy::PauseOnWorkerStart::kDontPause,
+ ParentExecutionContextTaskRunners::Create());
+ return thread;
+ }
+
+ void RunBasicTest(WorkerThread* thread) {
+ WaitableEvent waitable_event;
+ PostCrossThreadTask(
+ *thread->GetTaskRunner(TaskType::kInternalTest), FROM_HERE,
+ CrossThreadBind(
+ &AudioWorkletGlobalScopeTest::RunBasicTestOnWorkletThread,
+ CrossThreadUnretained(this), CrossThreadUnretained(thread),
+ CrossThreadUnretained(&waitable_event)));
+ waitable_event.Wait();
+ }
+
+ void RunSimpleProcessTest(WorkerThread* thread) {
+ WaitableEvent waitable_event;
+ PostCrossThreadTask(
+ *thread->GetTaskRunner(TaskType::kInternalTest), FROM_HERE,
+ CrossThreadBind(
+ &AudioWorkletGlobalScopeTest::RunSimpleProcessTestOnWorkletThread,
+ CrossThreadUnretained(this), CrossThreadUnretained(thread),
+ CrossThreadUnretained(&waitable_event)));
+ waitable_event.Wait();
+ }
+
+ void RunParsingTest(WorkerThread* thread) {
+ WaitableEvent waitable_event;
+ PostCrossThreadTask(
+ *thread->GetTaskRunner(TaskType::kInternalTest), FROM_HERE,
+ CrossThreadBind(
+ &AudioWorkletGlobalScopeTest::RunParsingTestOnWorkletThread,
+ CrossThreadUnretained(this), CrossThreadUnretained(thread),
+ CrossThreadUnretained(&waitable_event)));
+ waitable_event.Wait();
+ }
+
+ void RunParsingParameterDescriptorTest(WorkerThread* thread) {
+ WaitableEvent waitable_event;
+ PostCrossThreadTask(
+ *thread->GetTaskRunner(TaskType::kInternalTest), FROM_HERE,
+ CrossThreadBind(&AudioWorkletGlobalScopeTest::
+ RunParsingParameterDescriptorTestOnWorkletThread,
+ CrossThreadUnretained(this),
+ CrossThreadUnretained(thread),
+ CrossThreadUnretained(&waitable_event)));
+ waitable_event.Wait();
+ }
+
+ private:
+ // Returns false when a script evaluation error happens.
+ bool EvaluateScriptModule(AudioWorkletGlobalScope* global_scope,
+ const String& source_code) {
+ ScriptState* script_state =
+ global_scope->ScriptController()->GetScriptState();
+ EXPECT_TRUE(script_state);
+ KURL js_url("https://example.com/worklet.js");
+ ScriptModule module = ScriptModule::Compile(
+ script_state->GetIsolate(), source_code, js_url, js_url,
+ ScriptFetchOptions(), kSharableCrossOrigin,
+ TextPosition::MinimumPosition(), ASSERT_NO_EXCEPTION);
+ EXPECT_FALSE(module.IsNull());
+ ScriptValue exception = module.Instantiate(script_state);
+ EXPECT_TRUE(exception.IsEmpty());
+ ScriptValue value = module.Evaluate(script_state);
+ return value.IsEmpty();
+ }
+
+ // Test if AudioWorkletGlobalScope and V8 components (ScriptState, Isolate)
+ // are properly instantiated. Runs a simple processor registration and check
+ // if the class definition is correctly registered, then instantiate an
+ // AudioWorkletProcessor instance from the definition.
+ void RunBasicTestOnWorkletThread(WorkerThread* thread,
+ WaitableEvent* wait_event) {
+ EXPECT_TRUE(thread->IsCurrentThread());
+
+ auto* global_scope = ToAudioWorkletGlobalScope(thread->GlobalScope());
+
+ ScriptState* script_state =
+ global_scope->ScriptController()->GetScriptState();
+ EXPECT_TRUE(script_state);
+
+ v8::Isolate* isolate = script_state->GetIsolate();
+ EXPECT_TRUE(isolate);
+
+ ScriptState::Scope scope(script_state);
+
+ String source_code =
+ R"JS(
+ class TestProcessor extends AudioWorkletProcessor {
+ constructor () { super(); }
+ process () {}
+ }
+ registerProcessor('testProcessor', TestProcessor);
+ )JS";
+ ASSERT_TRUE(EvaluateScriptModule(global_scope, source_code));
+
+ AudioWorkletProcessorDefinition* definition =
+ global_scope->FindDefinition("testProcessor");
+ EXPECT_TRUE(definition);
+ EXPECT_EQ(definition->GetName(), "testProcessor");
+ EXPECT_TRUE(definition->ConstructorLocal(isolate)->IsFunction());
+ EXPECT_TRUE(definition->ProcessLocal(isolate)->IsFunction());
+ MessageChannel* channel = MessageChannel::Create(thread->GlobalScope());
+ MessagePortChannel dummy_port_channel = channel->port2()->Disentangle();
+
+ AudioWorkletProcessor* processor =
+ global_scope->CreateProcessor("testProcessor",
+ dummy_port_channel,
+ SerializedScriptValue::NullValue());
+ EXPECT_TRUE(processor);
+ EXPECT_EQ(processor->Name(), "testProcessor");
+ v8::Local<v8::Value> processor_value =
+ ToV8(processor, script_state->GetContext()->Global(), isolate);
+ EXPECT_TRUE(processor_value->IsObject());
+
+ wait_event->Signal();
+ }
+
+ // Test if various class definition patterns are parsed correctly.
+ void RunParsingTestOnWorkletThread(WorkerThread* thread,
+ WaitableEvent* wait_event) {
+ EXPECT_TRUE(thread->IsCurrentThread());
+
+ auto* global_scope = ToAudioWorkletGlobalScope(thread->GlobalScope());
+
+ ScriptState* script_state =
+ global_scope->ScriptController()->GetScriptState();
+ EXPECT_TRUE(script_state);
+
+ ScriptState::Scope scope(script_state);
+
+ {
+ // registerProcessor() with a valid class definition should define a
+ // processor. Note that these classes will fail at the construction time
+ // because they're not valid AudioWorkletProcessor.
+ String source_code =
+ R"JS(
+ var class1 = function () {};
+ class1.prototype.process = function () {};
+ registerProcessor('class1', class1);
+
+ var class2 = function () {};
+ class2.prototype = { process: function () {} };
+ registerProcessor('class2', class2);
+ )JS";
+ ASSERT_TRUE(EvaluateScriptModule(global_scope, source_code));
+ EXPECT_TRUE(global_scope->FindDefinition("class1"));
+ EXPECT_TRUE(global_scope->FindDefinition("class2"));
+ }
+
+ {
+ // registerProcessor() with an invalid class definition should fail to
+ // define a processor.
+ String source_code =
+ R"JS(
+ var class3 = function () {};
+ Object.defineProperty(class3, 'prototype', {
+ get: function () {
+ return {
+ process: function () {}
+ };
+ }
+ });
+ registerProcessor('class3', class3);
+ )JS";
+ ASSERT_FALSE(EvaluateScriptModule(global_scope, source_code));
+ EXPECT_FALSE(global_scope->FindDefinition("class3"));
+ }
+
+ wait_event->Signal();
+ }
+
+ // Test if the invocation of process() method in AudioWorkletProcessor and
+ // AudioWorkletGlobalScope is performed correctly.
+ void RunSimpleProcessTestOnWorkletThread(WorkerThread* thread,
+ WaitableEvent* wait_event) {
+ EXPECT_TRUE(thread->IsCurrentThread());
+
+ auto* global_scope = ToAudioWorkletGlobalScope(thread->GlobalScope());
+ ScriptState* script_state =
+ global_scope->ScriptController()->GetScriptState();
+
+ ScriptState::Scope scope(script_state);
+
+ String source_code =
+ R"JS(
+ class TestProcessor extends AudioWorkletProcessor {
+ constructor () {
+ super();
+ this.constant_ = 1;
+ }
+ process (inputs, outputs) {
+ let inputChannel = inputs[0][0];
+ let outputChannel = outputs[0][0];
+ for (let i = 0; i < outputChannel.length; ++i) {
+ outputChannel[i] = inputChannel[i] + this.constant_;
+ }
+ }
+ }
+ registerProcessor('testProcessor', TestProcessor);
+ )JS";
+ ASSERT_TRUE(EvaluateScriptModule(global_scope, source_code));
+
+ MessageChannel* channel = MessageChannel::Create(thread->GlobalScope());
+ MessagePortChannel dummy_port_channel = channel->port2()->Disentangle();
+ AudioWorkletProcessor* processor =
+ global_scope->CreateProcessor("testProcessor",
+ dummy_port_channel,
+ SerializedScriptValue::NullValue());
+ EXPECT_TRUE(processor);
+
+ Vector<AudioBus*> input_buses;
+ Vector<AudioBus*> output_buses;
+ HashMap<String, std::unique_ptr<AudioFloatArray>> param_data_map;
+ scoped_refptr<AudioBus> input_bus =
+ AudioBus::Create(1, kRenderQuantumFrames);
+ scoped_refptr<AudioBus> output_bus =
+ AudioBus::Create(1, kRenderQuantumFrames);
+ AudioChannel* input_channel = input_bus->Channel(0);
+ AudioChannel* output_channel = output_bus->Channel(0);
+
+ input_buses.push_back(input_bus.get());
+ output_buses.push_back(output_bus.get());
+
+ // Fill |input_channel| with 1 and zero out |output_bus|.
+ std::fill(input_channel->MutableData(),
+ input_channel->MutableData() + input_channel->length(), 1);
+ output_bus->Zero();
+
+ // Then invoke the process() method to perform JS buffer manipulation. The
+ // output buffer should contain a constant value of 2.
+ processor->Process(&input_buses, &output_buses, &param_data_map);
+ for (unsigned i = 0; i < output_channel->length(); ++i) {
+ EXPECT_EQ(output_channel->Data()[i], 2);
+ }
+
+ wait_event->Signal();
+ }
+
+ void RunParsingParameterDescriptorTestOnWorkletThread(
+ WorkerThread* thread,
+ WaitableEvent* wait_event) {
+ EXPECT_TRUE(thread->IsCurrentThread());
+
+ auto* global_scope = ToAudioWorkletGlobalScope(thread->GlobalScope());
+ ScriptState* script_state =
+ global_scope->ScriptController()->GetScriptState();
+
+ ScriptState::Scope scope(script_state);
+
+ String source_code =
+ R"JS(
+ class TestProcessor extends AudioWorkletProcessor {
+ static get parameterDescriptors () {
+ return [{
+ name: 'gain',
+ defaultValue: 0.707,
+ minValue: 0.0,
+ maxValue: 1.0
+ }];
+ }
+ constructor () { super(); }
+ process () {}
+ }
+ registerProcessor('testProcessor', TestProcessor);
+ )JS";
+ ASSERT_TRUE(EvaluateScriptModule(global_scope, source_code));
+
+ AudioWorkletProcessorDefinition* definition =
+ global_scope->FindDefinition("testProcessor");
+ EXPECT_TRUE(definition);
+ EXPECT_EQ(definition->GetName(), "testProcessor");
+
+ const Vector<String> param_names =
+ definition->GetAudioParamDescriptorNames();
+ EXPECT_EQ(param_names[0], "gain");
+
+ const AudioParamDescriptor* descriptor =
+ definition->GetAudioParamDescriptor(param_names[0]);
+ EXPECT_EQ(descriptor->defaultValue(), 0.707f);
+ EXPECT_EQ(descriptor->minValue(), 0.0f);
+ EXPECT_EQ(descriptor->maxValue(), 1.0f);
+
+ wait_event->Signal();
+ }
+
+ std::unique_ptr<WorkerReportingProxy> reporting_proxy_;
+};
+
+TEST_F(AudioWorkletGlobalScopeTest, Basic) {
+ std::unique_ptr<AudioWorkletThread> thread = CreateAudioWorkletThread();
+ RunBasicTest(thread.get());
+ thread->Terminate();
+ thread->WaitForShutdownForTesting();
+}
+
+TEST_F(AudioWorkletGlobalScopeTest, Parsing) {
+ std::unique_ptr<AudioWorkletThread> thread = CreateAudioWorkletThread();
+ RunParsingTest(thread.get());
+ thread->Terminate();
+ thread->WaitForShutdownForTesting();
+}
+
+TEST_F(AudioWorkletGlobalScopeTest, BufferProcessing) {
+ std::unique_ptr<AudioWorkletThread> thread = CreateAudioWorkletThread();
+ RunSimpleProcessTest(thread.get());
+ thread->Terminate();
+ thread->WaitForShutdownForTesting();
+}
+
+TEST_F(AudioWorkletGlobalScopeTest, ParsingParameterDescriptor) {
+ std::unique_ptr<AudioWorkletThread> thread = CreateAudioWorkletThread();
+ RunParsingParameterDescriptorTest(thread.get());
+ thread->Terminate();
+ thread->WaitForShutdownForTesting();
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_messaging_proxy.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_messaging_proxy.cc
new file mode 100644
index 00000000000..996496a7b82
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_messaging_proxy.cc
@@ -0,0 +1,106 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/modules/webaudio/audio_worklet_messaging_proxy.h"
+
+#include "third_party/blink/public/platform/task_type.h"
+#include "third_party/blink/renderer/bindings/core/v8/serialization/serialized_script_value.h"
+#include "third_party/blink/renderer/core/messaging/message_port.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_worklet.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_worklet_global_scope.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_worklet_node.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_worklet_object_proxy.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_worklet_processor.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_worklet_thread.h"
+#include "third_party/blink/renderer/modules/webaudio/cross_thread_audio_worklet_processor_info.h"
+
+namespace blink {
+
+AudioWorkletMessagingProxy::AudioWorkletMessagingProxy(
+ ExecutionContext* execution_context,
+ AudioWorklet* worklet)
+ : ThreadedWorkletMessagingProxy(execution_context), worklet_(worklet) {}
+
+void AudioWorkletMessagingProxy::CreateProcessor(
+ AudioWorkletHandler* handler,
+ MessagePortChannel message_port_channel,
+ scoped_refptr<SerializedScriptValue> node_options) {
+ DCHECK(IsMainThread());
+ PostCrossThreadTask(
+ *GetWorkerThread()->GetTaskRunner(TaskType::kMiscPlatformAPI), FROM_HERE,
+ CrossThreadBind(
+ &AudioWorkletMessagingProxy::CreateProcessorOnRenderingThread,
+ WrapCrossThreadPersistent(this),
+ CrossThreadUnretained(GetWorkerThread()),
+ CrossThreadUnretained(handler),
+ handler->Name(),
+ std::move(message_port_channel),
+ std::move(node_options)));
+}
+
+void AudioWorkletMessagingProxy::CreateProcessorOnRenderingThread(
+ WorkerThread* worker_thread,
+ AudioWorkletHandler* handler,
+ const String& name,
+ MessagePortChannel message_port_channel,
+ scoped_refptr<SerializedScriptValue> node_options) {
+ DCHECK(worker_thread->IsCurrentThread());
+ AudioWorkletGlobalScope* global_scope =
+ ToAudioWorkletGlobalScope(worker_thread->GlobalScope());
+ AudioWorkletProcessor* processor =
+ global_scope->CreateProcessor(name, message_port_channel, node_options);
+ handler->SetProcessorOnRenderThread(processor);
+}
+
+void AudioWorkletMessagingProxy::SynchronizeWorkletProcessorInfoList(
+ std::unique_ptr<Vector<CrossThreadAudioWorkletProcessorInfo>> info_list) {
+ DCHECK(IsMainThread());
+ for (auto& processor_info : *info_list) {
+ processor_info_map_.insert(processor_info.Name(),
+ processor_info.ParamInfoList());
+ }
+
+ // Notify AudioWorklet object that the global scope has been updated after the
+ // script evaluation.
+ worklet_->NotifyGlobalScopeIsUpdated();
+}
+
+bool AudioWorkletMessagingProxy::IsProcessorRegistered(
+ const String& name) const {
+ return processor_info_map_.Contains(name);
+}
+
+const Vector<CrossThreadAudioParamInfo>
+AudioWorkletMessagingProxy::GetParamInfoListForProcessor(
+ const String& name) const {
+ DCHECK(IsProcessorRegistered(name));
+ return processor_info_map_.at(name);
+}
+
+WorkerThread* AudioWorkletMessagingProxy::GetBackingWorkerThread() {
+ return GetWorkerThread();
+}
+
+std::unique_ptr<ThreadedWorkletObjectProxy>
+AudioWorkletMessagingProxy::CreateObjectProxy(
+ ThreadedWorkletMessagingProxy* messaging_proxy,
+ ParentExecutionContextTaskRunners* parent_execution_context_task_runners) {
+ return std::make_unique<AudioWorkletObjectProxy>(
+ static_cast<AudioWorkletMessagingProxy*>(messaging_proxy),
+ parent_execution_context_task_runners,
+ worklet_->GetBaseAudioContext()->sampleRate());
+}
+
+std::unique_ptr<WorkerThread> AudioWorkletMessagingProxy::CreateWorkerThread() {
+ return AudioWorkletThread::Create(CreateThreadableLoadingContext(),
+ WorkletObjectProxy());
+}
+
+void AudioWorkletMessagingProxy::Trace(Visitor* visitor) {
+ visitor->Trace(worklet_);
+ ThreadedWorkletMessagingProxy::Trace(visitor);
+}
+
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_messaging_proxy.h b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_messaging_proxy.h
new file mode 100644
index 00000000000..4308120cac9
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_messaging_proxy.h
@@ -0,0 +1,79 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_WORKLET_MESSAGING_PROXY_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_WORKLET_MESSAGING_PROXY_H_
+
+#include <memory>
+#include "third_party/blink/renderer/core/workers/threaded_worklet_messaging_proxy.h"
+
+namespace blink {
+
+class AudioWorklet;
+class AudioWorkletHandler;
+class CrossThreadAudioParamInfo;
+class CrossThreadAudioWorkletProcessorInfo;
+class ExecutionContext;
+class MessagePortChannel;
+class WorkerThread;
+
+// AudioWorkletMessagingProxy is a main thread interface for
+// AudioWorkletGlobalScope. The proxy communicates with the associated global
+// scope via AudioWorkletObjectProxy.
+class AudioWorkletMessagingProxy final : public ThreadedWorkletMessagingProxy {
+ public:
+ AudioWorkletMessagingProxy(ExecutionContext*, AudioWorklet*);
+
+ // Since the creation of AudioWorkletProcessor needs to be done in the
+ // different thread, this method is a wrapper for cross-thread task posting.
+ void CreateProcessor(AudioWorkletHandler*,
+ MessagePortChannel,
+ scoped_refptr<SerializedScriptValue> node_options);
+
+ // Invokes AudioWorkletGlobalScope to create an instance of
+ // AudioWorkletProcessor.
+ void CreateProcessorOnRenderingThread(
+ WorkerThread*,
+ AudioWorkletHandler*,
+ const String& name,
+ MessagePortChannel,
+ scoped_refptr<SerializedScriptValue> node_options);
+
+ // Invoked by AudioWorkletObjectProxy on AudioWorkletThread to fetch the
+ // information from AudioWorkletGlobalScope to AudioWorkletMessagingProxy
+ // after the script code evaluation. It copies the information about newly
+ // added AudioWorkletProcessor since the previous synchronization. (e.g.
+ // processor name and AudioParam list)
+ void SynchronizeWorkletProcessorInfoList(
+ std::unique_ptr<Vector<CrossThreadAudioWorkletProcessorInfo>>);
+
+ // Returns true if the processor with given name is registered in
+ // AudioWorkletGlobalScope.
+ bool IsProcessorRegistered(const String& name) const;
+
+ const Vector<CrossThreadAudioParamInfo> GetParamInfoListForProcessor(
+ const String& name) const;
+
+ // Returns a WorkerThread object backs the AudioWorkletThread instance.
+ WorkerThread* GetBackingWorkerThread();
+
+ void Trace(Visitor*);
+
+ private:
+ // Implements ThreadedWorkletMessagingProxy.
+ std::unique_ptr<ThreadedWorkletObjectProxy> CreateObjectProxy(
+ ThreadedWorkletMessagingProxy*,
+ ParentExecutionContextTaskRunners*) override;
+
+ std::unique_ptr<WorkerThread> CreateWorkerThread() override;
+
+ // Each entry consists of processor name and associated AudioParam list.
+ HashMap<String, Vector<CrossThreadAudioParamInfo>> processor_info_map_;
+
+ Member<AudioWorklet> worklet_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_WORKLET_MESSAGING_PROXY_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_node.cc
new file mode 100644
index 00000000000..6cf5fe5a144
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_node.cc
@@ -0,0 +1,383 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/modules/webaudio/audio_worklet_node.h"
+
+#include "third_party/blink/public/platform/task_type.h"
+#include "third_party/blink/renderer/bindings/core/v8/serialization/serialized_script_value.h"
+#include "third_party/blink/renderer/core/messaging/message_channel.h"
+#include "third_party/blink/renderer/core/messaging/message_port.h"
+#include "third_party/blink/renderer/modules/event_modules.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_buffer.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node_input.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node_output.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_param_descriptor.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_worklet.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_worklet_processor.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_worklet_processor_definition.h"
+#include "third_party/blink/renderer/modules/webaudio/cross_thread_audio_worklet_processor_info.h"
+#include "third_party/blink/renderer/platform/audio/audio_bus.h"
+#include "third_party/blink/renderer/platform/audio/audio_utilities.h"
+#include "third_party/blink/renderer/platform/cross_thread_functional.h"
+#include "third_party/blink/renderer/platform/heap/persistent.h"
+
+namespace blink {
+
+AudioWorkletHandler::AudioWorkletHandler(
+ AudioNode& node,
+ float sample_rate,
+ String name,
+ HashMap<String, scoped_refptr<AudioParamHandler>> param_handler_map,
+ const AudioWorkletNodeOptions& options)
+ : AudioHandler(kNodeTypeAudioWorklet, node, sample_rate),
+ name_(name),
+ param_handler_map_(param_handler_map) {
+ DCHECK(IsMainThread());
+
+ for (const auto& param_name : param_handler_map_.Keys()) {
+ param_value_map_.Set(
+ param_name,
+ std::make_unique<AudioFloatArray>(
+ AudioUtilities::kRenderQuantumFrames));
+ }
+
+ for (unsigned i = 0; i < options.numberOfInputs(); ++i) {
+ AddInput();
+ }
+
+ // If |options.outputChannelCount| unspecified, all outputs are mono.
+ for (unsigned i = 0; i < options.numberOfOutputs(); ++i) {
+ unsigned long channel_count = options.hasOutputChannelCount()
+ ? options.outputChannelCount()[i]
+ : 1;
+ AddOutput(channel_count);
+ }
+
+ if (Context()->GetExecutionContext()) {
+ // Cross-thread tasks between AWN/AWP is okay to be throttled, thus
+ // kMiscPlatformAPI. It is for post-creation/destruction chores.
+ main_thread_task_runner_ = Context()->GetExecutionContext()->GetTaskRunner(
+ TaskType::kMiscPlatformAPI);
+ DCHECK(main_thread_task_runner_->BelongsToCurrentThread());
+ }
+
+ Initialize();
+}
+
+AudioWorkletHandler::~AudioWorkletHandler() {
+ Uninitialize();
+}
+
+scoped_refptr<AudioWorkletHandler> AudioWorkletHandler::Create(
+ AudioNode& node,
+ float sample_rate,
+ String name,
+ HashMap<String, scoped_refptr<AudioParamHandler>> param_handler_map,
+ const AudioWorkletNodeOptions& options) {
+ return base::AdoptRef(new AudioWorkletHandler(node, sample_rate, name,
+ param_handler_map, options));
+}
+
+void AudioWorkletHandler::Process(size_t frames_to_process) {
+ DCHECK(Context()->IsAudioThread());
+
+ // Render and update the node state when the processor is ready with no error.
+ // We also need to check if the global scope is valid before we request
+ // the rendering in the AudioWorkletGlobalScope.
+ if (processor_ && !processor_->hasErrorOccured()) {
+ Vector<AudioBus*> input_buses;
+ Vector<AudioBus*> output_buses;
+ for (unsigned i = 0; i < NumberOfInputs(); ++i) {
+ // If the input is not connected, inform the processor of that
+ // fact by setting the bus to null.
+ AudioBus* bus = Input(i).IsConnected() ? Input(i).Bus() : nullptr;
+ input_buses.push_back(bus);
+ }
+ for (unsigned i = 0; i < NumberOfOutputs(); ++i)
+ output_buses.push_back(Output(i).Bus());
+
+ for (const auto& param_name : param_value_map_.Keys()) {
+ const auto param_handler = param_handler_map_.at(param_name);
+ AudioFloatArray* param_values = param_value_map_.at(param_name);
+ if (param_handler->HasSampleAccurateValues()) {
+ param_handler->CalculateSampleAccurateValues(
+ param_values->Data(), frames_to_process);
+ } else {
+ std::fill(param_values->Data(),
+ param_values->Data() + frames_to_process,
+ param_handler->Value());
+ }
+ }
+
+ // Run the render code and check the state of processor. Finish the
+ // processor if needed.
+ if (!processor_->Process(&input_buses, &output_buses, &param_value_map_) ||
+ processor_->hasErrorOccured()) {
+ FinishProcessorOnRenderThread();
+ }
+ } else {
+ // The initialization of handler or the associated processor might not be
+ // ready yet or it is in the error state. If so, zero out the connected
+ // output.
+ for (unsigned i = 0; i < NumberOfOutputs(); ++i) {
+ Output(i).Bus()->Zero();
+ }
+ }
+}
+
+void AudioWorkletHandler::CheckNumberOfChannelsForInput(AudioNodeInput* input) {
+ DCHECK(Context()->IsAudioThread());
+ DCHECK(Context()->IsGraphOwner());
+ DCHECK(input);
+
+ // Dynamic channel count only works when the node has 1 input and 1 output.
+ // Otherwise the channel count(s) should not be dynamically changed.
+ if (NumberOfInputs() == 1 && NumberOfOutputs() == 1) {
+ DCHECK_EQ(input, &this->Input(0));
+ unsigned number_of_input_channels = Input(0).NumberOfChannels();
+ if (number_of_input_channels != Output(0).NumberOfChannels()) {
+ // This will propagate the channel count to any nodes connected further
+ // downstream in the graph.
+ Output(0).SetNumberOfChannels(number_of_input_channels);
+ }
+ }
+
+ AudioHandler::CheckNumberOfChannelsForInput(input);
+}
+
+double AudioWorkletHandler::TailTime() const {
+ DCHECK(Context()->IsAudioThread());
+ return tail_time_;
+}
+
+void AudioWorkletHandler::SetProcessorOnRenderThread(
+ AudioWorkletProcessor* processor) {
+ // TODO(hongchan): unify the thread ID check. The thread ID for this call
+ // is different from |Context()->IsAudiothread()|.
+ DCHECK(!IsMainThread());
+
+ // |processor| can be nullptr when the invocation of user-supplied constructor
+ // fails. That failure fires at the node's 'onprocessorerror' event handler.
+ if (processor) {
+ processor_ = processor;
+ } else {
+ PostCrossThreadTask(
+ *main_thread_task_runner_, FROM_HERE,
+ CrossThreadBind(&AudioWorkletHandler::NotifyProcessorError,
+ WrapRefCounted(this),
+ AudioWorkletProcessorErrorState::kConstructionError));
+ }
+}
+
+void AudioWorkletHandler::FinishProcessorOnRenderThread() {
+ DCHECK(Context()->IsAudioThread());
+
+ // If the user-supplied code is not runnable (i.e. threw an exception)
+ // anymore after the process() call above. Invoke error on the main thread.
+ AudioWorkletProcessorErrorState error_state = processor_->GetErrorState();
+ if (error_state == AudioWorkletProcessorErrorState::kProcessError) {
+ PostCrossThreadTask(
+ *main_thread_task_runner_, FROM_HERE,
+ CrossThreadBind(&AudioWorkletHandler::NotifyProcessorError,
+ WrapRefCounted(this),
+ error_state));
+ }
+
+ // TODO(hongchan): After this point, The handler has no more pending activity
+ // and ready for GC.
+ Context()->NotifySourceNodeFinishedProcessing(this);
+ processor_.Clear();
+ tail_time_ = 0;
+}
+
+void AudioWorkletHandler::NotifyProcessorError(
+ AudioWorkletProcessorErrorState error_state) {
+ DCHECK(IsMainThread());
+ if (!Context() || !Context()->GetExecutionContext() || !GetNode())
+ return;
+
+ static_cast<AudioWorkletNode*>(GetNode())->FireProcessorError();
+}
+
+// ----------------------------------------------------------------
+
+AudioWorkletNode::AudioWorkletNode(
+ BaseAudioContext& context,
+ const String& name,
+ const AudioWorkletNodeOptions& options,
+ const Vector<CrossThreadAudioParamInfo> param_info_list,
+ MessagePort* node_port)
+ : AudioNode(context),
+ node_port_(node_port) {
+ HeapHashMap<String, Member<AudioParam>> audio_param_map;
+ HashMap<String, scoped_refptr<AudioParamHandler>> param_handler_map;
+ for (const auto& param_info : param_info_list) {
+ String param_name = param_info.Name().IsolatedCopy();
+ AudioParam* audio_param =
+ AudioParam::Create(context, kParamTypeAudioWorklet,
+ "AudioWorklet(\"" + name + "\")." + param_name,
+ param_info.DefaultValue(), param_info.MinValue(),
+ param_info.MaxValue());
+ audio_param_map.Set(param_name, audio_param);
+ param_handler_map.Set(param_name, WrapRefCounted(&audio_param->Handler()));
+
+ if (options.hasParameterData()) {
+ for (const auto& key_value_pair : options.parameterData()) {
+ if (key_value_pair.first == param_name)
+ audio_param->setValue(key_value_pair.second);
+ }
+ }
+ }
+ parameter_map_ = new AudioParamMap(audio_param_map);
+
+ SetHandler(AudioWorkletHandler::Create(*this,
+ context.sampleRate(),
+ name,
+ param_handler_map,
+ options));
+}
+
+AudioWorkletNode* AudioWorkletNode::Create(
+ ScriptState* script_state,
+ BaseAudioContext* context,
+ const String& name,
+ const AudioWorkletNodeOptions& options,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ if (context->IsContextClosed()) {
+ context->ThrowExceptionForClosedState(exception_state);
+ return nullptr;
+ }
+
+ if (options.numberOfInputs() == 0 && options.numberOfOutputs() == 0) {
+ exception_state.ThrowDOMException(
+ kNotSupportedError,
+ "AudioWorkletNode cannot be created: Number of inputs and number of "
+ "outputs cannot be both zero.");
+ return nullptr;
+ }
+
+ if (options.hasOutputChannelCount()) {
+ if (options.numberOfOutputs() != options.outputChannelCount().size()) {
+ exception_state.ThrowDOMException(
+ kIndexSizeError,
+ "AudioWorkletNode cannot be created: Length of specified "
+ "'outputChannelCount' (" +
+ String::Number(options.outputChannelCount().size()) +
+ ") does not match the given number of outputs (" +
+ String::Number(options.numberOfOutputs()) + ").");
+ return nullptr;
+ }
+
+ for (const auto& channel_count : options.outputChannelCount()) {
+ if (channel_count < 1 ||
+ channel_count > BaseAudioContext::MaxNumberOfChannels()) {
+ exception_state.ThrowDOMException(
+ kNotSupportedError,
+ ExceptionMessages::IndexOutsideRange<unsigned long>(
+ "channel count", channel_count,
+ 1,
+ ExceptionMessages::kInclusiveBound,
+ BaseAudioContext::MaxNumberOfChannels(),
+ ExceptionMessages::kInclusiveBound));
+ return nullptr;
+ }
+ }
+ }
+
+ if (!context->audioWorklet()->IsReady()) {
+ exception_state.ThrowDOMException(
+ kInvalidStateError,
+ "AudioWorkletNode cannot be created: AudioWorklet does not have a "
+ "valid AudioWorkletGlobalScope. Load a script via "
+ "audioWorklet.addModule() first.");
+ return nullptr;
+ }
+
+ if (!context->audioWorklet()->IsProcessorRegistered(name)) {
+ exception_state.ThrowDOMException(
+ kInvalidStateError,
+ "AudioWorkletNode cannot be created: The node name '" + name +
+ "' is not defined in AudioWorkletGlobalScope.");
+ return nullptr;
+ }
+
+ MessageChannel* channel =
+ MessageChannel::Create(context->GetExecutionContext());
+ MessagePortChannel processor_port_channel = channel->port2()->Disentangle();
+
+ AudioWorkletNode* node =
+ new AudioWorkletNode(*context, name, options,
+ context->audioWorklet()->GetParamInfoListForProcessor(name),
+ channel->port1());
+
+ if (!node) {
+ exception_state.ThrowDOMException(
+ kInvalidStateError,
+ "AudioWorkletNode cannot be created.");
+ return nullptr;
+ }
+
+ node->HandleChannelOptions(options, exception_state);
+
+ // context keeps reference as a source node.
+ context->NotifySourceNodeStartedProcessing(node);
+
+ v8::Isolate* isolate = script_state->GetIsolate();
+ SerializedScriptValue::SerializeOptions serialize_options;
+ serialize_options.for_storage = SerializedScriptValue::kNotForStorage;
+
+ // The node options must be serialized since they are passed to and consumed
+ // by a worklet thread.
+ scoped_refptr<SerializedScriptValue> serialized_node_options =
+ SerializedScriptValue::Serialize(
+ isolate,
+ ToV8(options, script_state->GetContext()->Global(), isolate),
+ serialize_options,
+ exception_state);
+
+ // |serialized_node_options| can be nullptr if the option dictionary is not
+ // valid.
+ if (!serialized_node_options) {
+ serialized_node_options = SerializedScriptValue::NullValue();
+ }
+ DCHECK(serialized_node_options);
+
+ // This is non-blocking async call. |node| still can be returned to user
+ // before the scheduled async task is completed.
+ context->audioWorklet()->CreateProcessor(&node->GetWorkletHandler(),
+ std::move(processor_port_channel),
+ std::move(serialized_node_options));
+
+ return node;
+}
+
+bool AudioWorkletNode::HasPendingActivity() const {
+ return !context()->IsContextClosed();
+}
+
+AudioParamMap* AudioWorkletNode::parameters() const {
+ return parameter_map_;
+}
+
+MessagePort* AudioWorkletNode::port() const {
+ return node_port_;
+}
+
+void AudioWorkletNode::FireProcessorError() {
+ DispatchEvent(Event::Create(EventTypeNames::processorerror));
+}
+
+AudioWorkletHandler& AudioWorkletNode::GetWorkletHandler() const {
+ return static_cast<AudioWorkletHandler&>(Handler());
+}
+
+void AudioWorkletNode::Trace(blink::Visitor* visitor) {
+ visitor->Trace(parameter_map_);
+ visitor->Trace(node_port_);
+ AudioNode::Trace(visitor);
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_node.h b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_node.h
new file mode 100644
index 00000000000..2919c5962e4
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_node.h
@@ -0,0 +1,129 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_WORKLET_NODE_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_WORKLET_NODE_H_
+
+#include "base/memory/scoped_refptr.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_param_map.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_worklet_node_options.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_worklet_processor_error_state.h"
+#include "third_party/blink/renderer/platform/wtf/threading.h"
+
+namespace blink {
+
+class AudioNodeInput;
+class AudioWorkletProcessor;
+class BaseAudioContext;
+class CrossThreadAudioParamInfo;
+class ExceptionState;
+class MessagePort;
+class ScriptState;
+
+// AudioWorkletNode is a user-facing interface of custom audio processor in
+// Web Audio API. The integration of WebAudio renderer is done via
+// AudioWorkletHandler and the actual audio processing runs on
+// AudioWorkletProcess.
+//
+// [Main Scope] | [AudioWorkletGlobalScope]
+// AudioWorkletNode <-> AudioWorkletHandler <==|==> AudioWorkletProcessor
+// (JS interface) (Renderer access) | (V8 audio processing)
+
+class AudioWorkletHandler final : public AudioHandler {
+ public:
+ static scoped_refptr<AudioWorkletHandler> Create(
+ AudioNode&,
+ float sample_rate,
+ String name,
+ HashMap<String, scoped_refptr<AudioParamHandler>> param_handler_map,
+ const AudioWorkletNodeOptions&);
+
+ ~AudioWorkletHandler() override;
+
+ // Called from render thread.
+ void Process(size_t frames_to_process) override;
+
+ void CheckNumberOfChannelsForInput(AudioNodeInput*) override;
+
+ double TailTime() const override;
+ double LatencyTime() const override { return 0; }
+
+ String Name() const { return name_; }
+
+ // Sets |AudioWorkletProcessor| and changes the state of the processor.
+ // MUST be called from the render thread.
+ void SetProcessorOnRenderThread(AudioWorkletProcessor*);
+
+ // Finish |AudioWorkletProcessor| and set the tail time to zero, when
+ // the user-supplied |process()| method returns false.
+ void FinishProcessorOnRenderThread();
+
+ void NotifyProcessorError(AudioWorkletProcessorErrorState);
+
+ private:
+ AudioWorkletHandler(
+ AudioNode&,
+ float sample_rate,
+ String name,
+ HashMap<String, scoped_refptr<AudioParamHandler>> param_handler_map,
+ const AudioWorkletNodeOptions&);
+
+ String name_;
+
+ double tail_time_ = std::numeric_limits<double>::infinity();
+
+ // MUST be set/used by render thread.
+ CrossThreadPersistent<AudioWorkletProcessor> processor_;
+
+ HashMap<String, scoped_refptr<AudioParamHandler>> param_handler_map_;
+ HashMap<String, std::unique_ptr<AudioFloatArray>> param_value_map_;
+
+ // TODO(): Adjust this if needed based on the result of the process
+ // method or the value of |tail_time_|.
+ bool RequiresTailProcessing() const { return true; }
+
+ scoped_refptr<base::SingleThreadTaskRunner> main_thread_task_runner_;
+};
+
+class AudioWorkletNode final : public AudioNode,
+ public ActiveScriptWrappable<AudioWorkletNode> {
+ DEFINE_WRAPPERTYPEINFO();
+ USING_GARBAGE_COLLECTED_MIXIN(AudioWorkletNode);
+
+ public:
+ static AudioWorkletNode* Create(ScriptState*,
+ BaseAudioContext*,
+ const String& name,
+ const AudioWorkletNodeOptions&,
+ ExceptionState&);
+
+ AudioWorkletHandler& GetWorkletHandler() const;
+
+ // ActiveScriptWrappable
+ bool HasPendingActivity() const final;
+
+ // IDL
+ AudioParamMap* parameters() const;
+ MessagePort* port() const;
+ DEFINE_ATTRIBUTE_EVENT_LISTENER(processorerror);
+
+ void FireProcessorError();
+
+ virtual void Trace(blink::Visitor*);
+
+ private:
+ AudioWorkletNode(BaseAudioContext&,
+ const String& name,
+ const AudioWorkletNodeOptions&,
+ const Vector<CrossThreadAudioParamInfo>,
+ MessagePort* node_port);
+
+ Member<AudioParamMap> parameter_map_;
+ Member<MessagePort> node_port_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_WORKLET_NODE_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_node.idl b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_node.idl
new file mode 100644
index 00000000000..4772e75dc74
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_node.idl
@@ -0,0 +1,17 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See https://webaudio.github.io/web-audio-api/#audioworkletnode
+
+[
+ ActiveScriptWrappable,
+ Constructor(BaseAudioContext context, DOMString name, optional AudioWorkletNodeOptions options),
+ ConstructorCallWith=ScriptState,
+ MeasureAs=AudioWorkletNodeConstructor,
+ RaisesException=Constructor
+] interface AudioWorkletNode : AudioNode {
+ readonly attribute AudioParamMap parameters;
+ readonly attribute MessagePort port;
+ attribute EventHandler onprocessorerror;
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_node_options.idl b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_node_options.idl
new file mode 100644
index 00000000000..5741f313f2a
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_node_options.idl
@@ -0,0 +1,12 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See: https://webaudio.github.io/web-audio-api/#dictdef-audioworkletnodeoptions
+dictionary AudioWorkletNodeOptions : AudioNodeOptions {
+ unsigned long numberOfInputs = 1;
+ unsigned long numberOfOutputs = 1;
+ sequence<unsigned long> outputChannelCount;
+ record<DOMString, double> parameterData;
+ object processorOptions;
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_object_proxy.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_object_proxy.cc
new file mode 100644
index 00000000000..ab8a269344d
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_object_proxy.cc
@@ -0,0 +1,63 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/modules/webaudio/audio_worklet_object_proxy.h"
+
+#include "third_party/blink/renderer/core/workers/threaded_worklet_messaging_proxy.h"
+#include "third_party/blink/renderer/core/workers/worker_thread.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_worklet_global_scope.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_worklet_messaging_proxy.h"
+#include "third_party/blink/renderer/modules/webaudio/cross_thread_audio_worklet_processor_info.h"
+#include "third_party/blink/renderer/platform/cross_thread_functional.h"
+
+namespace blink {
+
+AudioWorkletObjectProxy::AudioWorkletObjectProxy(
+ AudioWorkletMessagingProxy* messaging_proxy_weak_ptr,
+ ParentExecutionContextTaskRunners* parent_execution_context_task_runners,
+ float context_sample_rate)
+ : ThreadedWorkletObjectProxy(
+ static_cast<ThreadedWorkletMessagingProxy*>(messaging_proxy_weak_ptr),
+ parent_execution_context_task_runners),
+ context_sample_rate_(context_sample_rate) {}
+
+void AudioWorkletObjectProxy::DidCreateWorkerGlobalScope(
+ WorkerOrWorkletGlobalScope* global_scope) {
+ global_scope_ = ToAudioWorkletGlobalScope(global_scope);
+ global_scope_->SetSampleRate(context_sample_rate_);
+}
+
+void AudioWorkletObjectProxy::DidEvaluateModuleScript(bool success) {
+ DCHECK(global_scope_);
+
+ if (!success || global_scope_->NumberOfRegisteredDefinitions() == 0)
+ return;
+
+ std::unique_ptr<Vector<CrossThreadAudioWorkletProcessorInfo>>
+ processor_info_list =
+ global_scope_->WorkletProcessorInfoListForSynchronization();
+
+ if (processor_info_list->size() == 0)
+ return;
+
+ PostCrossThreadTask(
+ *GetParentExecutionContextTaskRunners()->Get(TaskType::kUnthrottled),
+ FROM_HERE,
+ CrossThreadBind(
+ &AudioWorkletMessagingProxy::SynchronizeWorkletProcessorInfoList,
+ GetAudioWorkletMessagingProxyWeakPtr(),
+ WTF::Passed(std::move(processor_info_list))));
+}
+
+void AudioWorkletObjectProxy::WillDestroyWorkerGlobalScope() {
+ global_scope_ = nullptr;
+}
+
+CrossThreadWeakPersistent<AudioWorkletMessagingProxy>
+AudioWorkletObjectProxy::GetAudioWorkletMessagingProxyWeakPtr() {
+ return static_cast<AudioWorkletMessagingProxy*>(
+ MessagingProxyWeakPtr().Get());
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_object_proxy.h b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_object_proxy.h
new file mode 100644
index 00000000000..5d356ceb880
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_object_proxy.h
@@ -0,0 +1,38 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_WORKLET_OBJECT_PROXY_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_WORKLET_OBJECT_PROXY_H_
+
+#include "third_party/blink/renderer/core/workers/threaded_worklet_object_proxy.h"
+
+namespace blink {
+
+class AudioWorkletGlobalScope;
+class AudioWorkletMessagingProxy;
+
+class AudioWorkletObjectProxy final
+ : public ThreadedWorkletObjectProxy {
+ public:
+ AudioWorkletObjectProxy(AudioWorkletMessagingProxy*,
+ ParentExecutionContextTaskRunners*,
+ float context_sample_rate);
+
+ // Implements WorkerReportingProxy.
+ void DidCreateWorkerGlobalScope(WorkerOrWorkletGlobalScope*) override;
+ void DidEvaluateModuleScript(bool success) override;
+ void WillDestroyWorkerGlobalScope() override;
+
+ private:
+ CrossThreadWeakPersistent<AudioWorkletMessagingProxy>
+ GetAudioWorkletMessagingProxyWeakPtr();
+
+ CrossThreadPersistent<AudioWorkletGlobalScope> global_scope_;
+
+ float context_sample_rate_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_WORKLET_OBJECT_PROXY_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_processor.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_processor.cc
new file mode 100644
index 00000000000..a197e21a6b8
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_processor.cc
@@ -0,0 +1,68 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/modules/webaudio/audio_worklet_processor.h"
+
+#include "third_party/blink/renderer/core/messaging/message_port.h"
+#include "third_party/blink/renderer/core/workers/worker_global_scope.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_buffer.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_worklet_global_scope.h"
+
+namespace blink {
+
+AudioWorkletProcessor* AudioWorkletProcessor::Create(
+ ExecutionContext* context) {
+ AudioWorkletGlobalScope* global_scope = ToAudioWorkletGlobalScope(context);
+ DCHECK(global_scope);
+ DCHECK(global_scope->IsContextThread());
+
+ // Get the stored initialization parameter from the global scope.
+ ProcessorCreationParams* params = global_scope->GetProcessorCreationParams();
+ DCHECK(params);
+
+ MessagePort* port = MessagePort::Create(*global_scope);
+ port->Entangle(std::move(params->PortChannel()));
+ return new AudioWorkletProcessor(global_scope, params->Name(), port);
+}
+
+AudioWorkletProcessor::AudioWorkletProcessor(
+ AudioWorkletGlobalScope* global_scope,
+ const String& name,
+ MessagePort* port)
+ : global_scope_(global_scope), processor_port_(port), name_(name) {}
+
+bool AudioWorkletProcessor::Process(
+ Vector<AudioBus*>* input_buses,
+ Vector<AudioBus*>* output_buses,
+ HashMap<String, std::unique_ptr<AudioFloatArray>>* param_value_map) {
+ DCHECK(global_scope_->IsContextThread());
+ DCHECK(!hasErrorOccured());
+ return global_scope_->Process(this, input_buses, output_buses,
+ param_value_map);
+}
+
+void AudioWorkletProcessor::SetErrorState(
+ AudioWorkletProcessorErrorState error_state) {
+ error_state_ = error_state;
+}
+
+AudioWorkletProcessorErrorState AudioWorkletProcessor::GetErrorState() const {
+ return error_state_;
+}
+
+bool AudioWorkletProcessor::hasErrorOccured() const {
+ return error_state_ != AudioWorkletProcessorErrorState::kNoError;
+}
+
+MessagePort* AudioWorkletProcessor::port() const {
+ return processor_port_.Get();
+}
+
+void AudioWorkletProcessor::Trace(blink::Visitor* visitor) {
+ visitor->Trace(global_scope_);
+ visitor->Trace(processor_port_);
+ ScriptWrappable::Trace(visitor);
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_processor.h b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_processor.h
new file mode 100644
index 00000000000..dcfc56af1ad
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_processor.h
@@ -0,0 +1,76 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_WORKLET_PROCESSOR_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_WORKLET_PROCESSOR_H_
+
+#include "third_party/blink/renderer/modules/modules_export.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_worklet_processor_error_state.h"
+#include "third_party/blink/renderer/platform/audio/audio_array.h"
+#include "third_party/blink/renderer/platform/bindings/script_wrappable.h"
+#include "third_party/blink/renderer/platform/bindings/trace_wrapper_v8_reference.h"
+#include "third_party/blink/renderer/platform/heap/handle.h"
+#include "third_party/blink/renderer/platform/wtf/text/wtf_string.h"
+#include "v8/include/v8.h"
+
+namespace blink {
+
+class AudioBus;
+class AudioWorkletGlobalScope;
+class AudioWorkletProcessorDefinition;
+class MessagePort;
+class ExecutionContext;
+
+// AudioWorkletProcessor class represents the active instance created from
+// AudioWorkletProcessorDefinition. |AudioWorkletNodeHandler| invokes
+// process() method in this object upon graph rendering.
+//
+// This is constructed and destroyed on a worker thread, and all methods also
+// must be called on the worker thread.
+class MODULES_EXPORT AudioWorkletProcessor : public ScriptWrappable {
+ DEFINE_WRAPPERTYPEINFO();
+
+ public:
+ // This static factory should be called after an instance of
+ // |AudioWorkletNode| gets created by user-supplied JS code in the main
+ // thread. This factory must not be called by user in
+ // |AudioWorkletGlobalScope|.
+ static AudioWorkletProcessor* Create(ExecutionContext*);
+
+ ~AudioWorkletProcessor() = default;
+
+ // |AudioWorkletHandler| invokes this method to process audio.
+ bool Process(
+ Vector<AudioBus*>* input_buses,
+ Vector<AudioBus*>* output_buses,
+ HashMap<String, std::unique_ptr<AudioFloatArray>>* param_value_map);
+
+ const String& Name() const { return name_; }
+
+ void SetErrorState(AudioWorkletProcessorErrorState);
+ AudioWorkletProcessorErrorState GetErrorState() const;
+ bool hasErrorOccured() const;
+
+ // IDL
+ MessagePort* port() const;
+
+ void Trace(blink::Visitor*);
+
+ private:
+ AudioWorkletProcessor(AudioWorkletGlobalScope*,
+ const String& name,
+ MessagePort*);
+
+ Member<AudioWorkletGlobalScope> global_scope_;
+ Member<MessagePort> processor_port_;
+
+ const String name_;
+
+ AudioWorkletProcessorErrorState error_state_ =
+ AudioWorkletProcessorErrorState::kNoError;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_WORKLET_PROCESSOR_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_processor.idl b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_processor.idl
new file mode 100644
index 00000000000..9853c972979
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_processor.idl
@@ -0,0 +1,13 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See https://webaudio.github.io/web-audio-api/#audioworkletprocessor
+
+[
+ Constructor,
+ ConstructorCallWith=ExecutionContext,
+ Exposed=AudioWorklet
+] interface AudioWorkletProcessor {
+ readonly attribute MessagePort port;
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_processor_definition.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_processor_definition.cc
new file mode 100644
index 00000000000..4712b7e2207
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_processor_definition.cc
@@ -0,0 +1,72 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/modules/webaudio/audio_worklet_processor_definition.h"
+
+namespace blink {
+
+AudioWorkletProcessorDefinition* AudioWorkletProcessorDefinition::Create(
+ v8::Isolate* isolate,
+ const String& name,
+ v8::Local<v8::Object> constructor,
+ v8::Local<v8::Function> process) {
+ DCHECK(!IsMainThread());
+ return new AudioWorkletProcessorDefinition(isolate, name, constructor,
+ process);
+}
+
+AudioWorkletProcessorDefinition::AudioWorkletProcessorDefinition(
+ v8::Isolate* isolate,
+ const String& name,
+ v8::Local<v8::Object> constructor,
+ v8::Local<v8::Function> process)
+ : name_(name),
+ constructor_(isolate, constructor),
+ process_(isolate, process) {}
+
+AudioWorkletProcessorDefinition::~AudioWorkletProcessorDefinition() = default;
+
+v8::Local<v8::Object> AudioWorkletProcessorDefinition::ConstructorLocal(
+ v8::Isolate* isolate) {
+ DCHECK(!IsMainThread());
+ return constructor_.NewLocal(isolate);
+}
+
+v8::Local<v8::Function> AudioWorkletProcessorDefinition::ProcessLocal(
+ v8::Isolate* isolate) {
+ DCHECK(!IsMainThread());
+ return process_.NewLocal(isolate);
+}
+
+void AudioWorkletProcessorDefinition::SetAudioParamDescriptors(
+ const HeapVector<AudioParamDescriptor>& descriptors) {
+ audio_param_descriptors_ = descriptors;
+}
+
+const Vector<String>
+ AudioWorkletProcessorDefinition::GetAudioParamDescriptorNames() const {
+ Vector<String> names;
+ for (const auto& descriptor : audio_param_descriptors_) {
+ names.push_back(descriptor.name());
+ }
+ return names;
+}
+
+const AudioParamDescriptor*
+ AudioWorkletProcessorDefinition::GetAudioParamDescriptor (
+ const String& key) const {
+ for (const auto& descriptor : audio_param_descriptors_) {
+ if (descriptor.name() == key)
+ return &descriptor;
+ }
+ return nullptr;
+}
+
+void AudioWorkletProcessorDefinition::TraceWrappers(
+ const ScriptWrappableVisitor* visitor) const {
+ visitor->TraceWrappers(constructor_.Cast<v8::Value>());
+ visitor->TraceWrappers(process_.Cast<v8::Value>());
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_processor_definition.h b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_processor_definition.h
new file mode 100644
index 00000000000..c4e18331632
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_processor_definition.h
@@ -0,0 +1,77 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_WORKLET_PROCESSOR_DEFINITION_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_WORKLET_PROCESSOR_DEFINITION_H_
+
+#include "third_party/blink/renderer/modules/modules_export.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_param_descriptor.h"
+#include "third_party/blink/renderer/platform/bindings/script_wrappable.h"
+#include "third_party/blink/renderer/platform/bindings/trace_wrapper_v8_reference.h"
+#include "third_party/blink/renderer/platform/heap/handle.h"
+#include "third_party/blink/renderer/platform/wtf/text/wtf_string.h"
+#include "v8/include/v8.h"
+
+namespace blink {
+
+// Represents a JavaScript class definition registered in the
+// AudioWorkletGlobalScope. After the registration, a definition class contains
+// the V8 representation of class components (constructor, process callback,
+// prototypes and parameter descriptors).
+//
+// This is constructed and destroyed on a worker thread, and all methods also
+// must be called on the worker thread.
+class MODULES_EXPORT AudioWorkletProcessorDefinition final
+ : public GarbageCollectedFinalized<AudioWorkletProcessorDefinition>,
+ public TraceWrapperBase {
+ public:
+ static AudioWorkletProcessorDefinition* Create(
+ v8::Isolate*,
+ const String& name,
+ v8::Local<v8::Object> constructor,
+ v8::Local<v8::Function> process);
+
+ virtual ~AudioWorkletProcessorDefinition();
+
+ const String& GetName() const { return name_; }
+ v8::Local<v8::Object> ConstructorLocal(v8::Isolate*);
+ v8::Local<v8::Function> ProcessLocal(v8::Isolate*);
+ void SetAudioParamDescriptors(const HeapVector<AudioParamDescriptor>&);
+ const Vector<String> GetAudioParamDescriptorNames() const;
+ const AudioParamDescriptor* GetAudioParamDescriptor(const String& key) const;
+
+ // Flag for data synchronization of definition between
+ // AudioWorkletMessagingProxy and AudioWorkletGlobalScope.
+ bool IsSynchronized() const { return is_synchronized_; }
+ void MarkAsSynchronized() { is_synchronized_ = true; }
+
+ void Trace(blink::Visitor* visitor) {
+ visitor->Trace(audio_param_descriptors_);
+ };
+ void TraceWrappers(const ScriptWrappableVisitor*) const override;
+ const char* NameInHeapSnapshot() const override {
+ return "AudioWorkletProcessorDefinition";
+ }
+
+ private:
+ AudioWorkletProcessorDefinition(
+ v8::Isolate*,
+ const String& name,
+ v8::Local<v8::Object> constructor,
+ v8::Local<v8::Function> process);
+
+ const String name_;
+ bool is_synchronized_ = false;
+
+ // The definition is per global scope. The active instance of
+ // |AudioProcessorWorklet| should be passed into these to perform JS function.
+ TraceWrapperV8Reference<v8::Object> constructor_;
+ TraceWrapperV8Reference<v8::Function> process_;
+
+ HeapVector<AudioParamDescriptor> audio_param_descriptors_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_WORKLET_PROCESSOR_DEFINITION_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_processor_error_state.h b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_processor_error_state.h
new file mode 100644
index 00000000000..a4c0afba170
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_processor_error_state.h
@@ -0,0 +1,25 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_WORKLET_PROCESSOR_ERROR_STATE_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_WORKLET_PROCESSOR_ERROR_STATE_H_
+
+namespace blink {
+
+// A list of state regarding the error in AudioWorkletProcessor object.
+enum class AudioWorkletProcessorErrorState : unsigned {
+ // The constructor or the process method in the processor has not thrown any
+ // exception.
+ kNoError = 0,
+
+ // An exception thrown from the construction failure.
+ kConstructionError = 1,
+
+ // An exception thrown from the process method.
+ kProcessError = 2,
+};
+
+} // namespace blink
+
+#endif
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_thread.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_thread.cc
new file mode 100644
index 00000000000..312cb3aa988
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_thread.cc
@@ -0,0 +1,113 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/modules/webaudio/audio_worklet_thread.h"
+
+#include <memory>
+
+#include "base/memory/ptr_util.h"
+#include "third_party/blink/public/platform/platform.h"
+#include "third_party/blink/renderer/core/workers/global_scope_creation_params.h"
+#include "third_party/blink/renderer/core/workers/worker_backing_thread.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_worklet.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_worklet_global_scope.h"
+#include "third_party/blink/renderer/platform/cross_thread_functional.h"
+#include "third_party/blink/renderer/platform/instrumentation/tracing/trace_event.h"
+#include "third_party/blink/renderer/platform/waitable_event.h"
+#include "third_party/blink/renderer/platform/web_thread_supporting_gc.h"
+#include "third_party/blink/renderer/platform/weborigin/security_origin.h"
+#include "third_party/blink/renderer/platform/wtf/assertions.h"
+
+namespace blink {
+
+template class WorkletThreadHolder<AudioWorkletThread>;
+
+WebThread* AudioWorkletThread::s_backing_thread_ = nullptr;
+
+unsigned AudioWorkletThread::s_ref_count_ = 0;
+
+std::unique_ptr<AudioWorkletThread> AudioWorkletThread::Create(
+ ThreadableLoadingContext* loading_context,
+ WorkerReportingProxy& worker_reporting_proxy) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("audio-worklet"),
+ "AudioWorkletThread::create");
+ return base::WrapUnique(
+ new AudioWorkletThread(loading_context, worker_reporting_proxy));
+}
+
+AudioWorkletThread::AudioWorkletThread(
+ ThreadableLoadingContext* loading_context,
+ WorkerReportingProxy& worker_reporting_proxy)
+ : WorkerThread(loading_context, worker_reporting_proxy) {
+ DCHECK(IsMainThread());
+ if (++s_ref_count_ == 1)
+ EnsureSharedBackingThread();
+}
+
+AudioWorkletThread::~AudioWorkletThread() {
+ DCHECK(IsMainThread());
+ if (--s_ref_count_ == 0)
+ ClearSharedBackingThread();
+}
+
+WorkerBackingThread& AudioWorkletThread::GetWorkerBackingThread() {
+ return *WorkletThreadHolder<AudioWorkletThread>::GetInstance()->GetThread();
+}
+
+void CollectAllGarbageOnAudioWorkletThread(WaitableEvent* done_event) {
+ blink::ThreadState::Current()->CollectAllGarbage();
+ done_event->Signal();
+}
+
+void AudioWorkletThread::CollectAllGarbage() {
+ DCHECK(IsMainThread());
+ WaitableEvent done_event;
+ WorkletThreadHolder<AudioWorkletThread>* worklet_thread_holder =
+ WorkletThreadHolder<AudioWorkletThread>::GetInstance();
+ if (!worklet_thread_holder)
+ return;
+ worklet_thread_holder->GetThread()->BackingThread().PostTask(
+ FROM_HERE, CrossThreadBind(&CollectAllGarbageOnAudioWorkletThread,
+ CrossThreadUnretained(&done_event)));
+ done_event.Wait();
+}
+
+void AudioWorkletThread::EnsureSharedBackingThread() {
+ DCHECK(IsMainThread());
+ if (!s_backing_thread_)
+ s_backing_thread_ = Platform::Current()->CreateWebAudioThread().release();
+ WorkletThreadHolder<AudioWorkletThread>::EnsureInstance(s_backing_thread_);
+}
+
+void AudioWorkletThread::ClearSharedBackingThread() {
+ DCHECK(IsMainThread());
+ DCHECK(s_backing_thread_);
+ DCHECK_EQ(s_ref_count_, 0u);
+ WorkletThreadHolder<AudioWorkletThread>::ClearInstance();
+ delete s_backing_thread_;
+ s_backing_thread_ = nullptr;
+}
+
+WebThread* AudioWorkletThread::GetSharedBackingThread() {
+ DCHECK(IsMainThread());
+ WorkletThreadHolder<AudioWorkletThread>* instance =
+ WorkletThreadHolder<AudioWorkletThread>::GetInstance();
+ return &(instance->GetThread()->BackingThread().PlatformThread());
+}
+
+void AudioWorkletThread::CreateSharedBackingThreadForTest() {
+ if (!s_backing_thread_)
+ s_backing_thread_ = Platform::Current()->CreateWebAudioThread().release();
+ WorkletThreadHolder<AudioWorkletThread>::CreateForTest(s_backing_thread_);
+}
+
+WorkerOrWorkletGlobalScope* AudioWorkletThread::CreateWorkerGlobalScope(
+ std::unique_ptr<GlobalScopeCreationParams> creation_params) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("audio-worklet"),
+ "AudioWorkletThread::createWorkerGlobalScope");
+ return AudioWorkletGlobalScope::Create(std::move(creation_params),
+ GetIsolate(), this);
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_thread.h b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_thread.h
new file mode 100644
index 00000000000..0277ad51102
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_thread.h
@@ -0,0 +1,70 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_WORKLET_THREAD_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_WORKLET_THREAD_H_
+
+#include <memory>
+#include "third_party/blink/renderer/core/workers/worker_thread.h"
+#include "third_party/blink/renderer/core/workers/worklet_thread_holder.h"
+#include "third_party/blink/renderer/modules/modules_export.h"
+
+namespace blink {
+
+class WebThread;
+class WorkerReportingProxy;
+
+// AudioWorkletThread is a per-frame singleton object that represents the
+// backing thread for the processing of AudioWorkletNode/AudioWorkletProcessor.
+// It is supposed to run an instance of V8 isolate. The life cycle of this
+// object is managed by the reference counting of the static backing thread.
+
+class MODULES_EXPORT AudioWorkletThread final : public WorkerThread {
+ public:
+ static std::unique_ptr<AudioWorkletThread> Create(ThreadableLoadingContext*,
+ WorkerReportingProxy&);
+ ~AudioWorkletThread() override;
+
+ WorkerBackingThread& GetWorkerBackingThread() override;
+
+ // The backing thread is cleared by clearSharedBackingThread().
+ void ClearWorkerBackingThread() override {}
+
+ // This may block the main thread.
+ static void CollectAllGarbage();
+
+ static void EnsureSharedBackingThread();
+ static void ClearSharedBackingThread();
+
+ static void CreateSharedBackingThreadForTest();
+
+ // This only can be called after EnsureSharedBackingThread() is performed.
+ // Currently AudioWorkletThread owns only one thread and it is shared by all
+ // the customers.
+ static WebThread* GetSharedBackingThread();
+
+ private:
+ AudioWorkletThread(ThreadableLoadingContext*, WorkerReportingProxy&);
+
+ WorkerOrWorkletGlobalScope* CreateWorkerGlobalScope(
+ std::unique_ptr<GlobalScopeCreationParams>) final;
+
+ bool IsOwningBackingThread() const override { return false; }
+
+ WebThreadType GetThreadType() const override {
+ return WebThreadType::kAudioWorkletThread;
+ }
+
+ // This raw pointer gets assigned in EnsureSharedBackingThread() and manually
+ // released by ClearSharedBackingThread().
+ static WebThread* s_backing_thread_;
+
+ // This is only accessed by the main thread. Incremented by the constructor,
+ // and decremented by destructor.
+ static unsigned s_ref_count_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_AUDIO_WORKLET_THREAD_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_thread_test.cc b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_thread_test.cc
new file mode 100644
index 00000000000..31ddb87c439
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/audio_worklet_thread_test.cc
@@ -0,0 +1,198 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/modules/webaudio/audio_worklet_thread.h"
+
+#include <memory>
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/blink/public/platform/platform.h"
+#include "third_party/blink/public/platform/web_url_request.h"
+#include "third_party/blink/renderer/bindings/core/v8/script_module.h"
+#include "third_party/blink/renderer/bindings/core/v8/script_source_code.h"
+#include "third_party/blink/renderer/bindings/core/v8/source_location.h"
+#include "third_party/blink/renderer/bindings/core/v8/v8_cache_options.h"
+#include "third_party/blink/renderer/bindings/core/v8/v8_gc_controller.h"
+#include "third_party/blink/renderer/bindings/core/v8/worker_or_worklet_script_controller.h"
+#include "third_party/blink/renderer/core/inspector/console_message.h"
+#include "third_party/blink/renderer/core/origin_trials/origin_trial_context.h"
+#include "third_party/blink/renderer/core/testing/page_test_base.h"
+#include "third_party/blink/renderer/core/workers/global_scope_creation_params.h"
+#include "third_party/blink/renderer/core/workers/worker_backing_thread.h"
+#include "third_party/blink/renderer/core/workers/worker_inspector_proxy.h"
+#include "third_party/blink/renderer/core/workers/worker_or_worklet_global_scope.h"
+#include "third_party/blink/renderer/core/workers/worker_reporting_proxy.h"
+#include "third_party/blink/renderer/core/workers/worklet_module_responses_map.h"
+#include "third_party/blink/renderer/platform/cross_thread_functional.h"
+#include "third_party/blink/renderer/platform/heap/handle.h"
+#include "third_party/blink/renderer/platform/loader/fetch/access_control_status.h"
+#include "third_party/blink/renderer/platform/loader/fetch/resource_loader_options.h"
+#include "third_party/blink/renderer/platform/testing/testing_platform_support.h"
+#include "third_party/blink/renderer/platform/testing/unit_test_helpers.h"
+#include "third_party/blink/renderer/platform/waitable_event.h"
+#include "third_party/blink/renderer/platform/web_thread_supporting_gc.h"
+#include "third_party/blink/renderer/platform/wtf/text/text_position.h"
+
+namespace blink {
+
+class AudioWorkletThreadTest : public PageTestBase {
+ public:
+ void SetUp() override {
+ AudioWorkletThread::CreateSharedBackingThreadForTest();
+ PageTestBase::SetUp(IntSize());
+ Document* document = &GetDocument();
+ document->SetURL(KURL("https://example.com/"));
+ document->UpdateSecurityOrigin(SecurityOrigin::Create(document->Url()));
+ reporting_proxy_ = std::make_unique<WorkerReportingProxy>();
+ }
+
+ std::unique_ptr<AudioWorkletThread> CreateAudioWorkletThread() {
+ std::unique_ptr<AudioWorkletThread> thread =
+ AudioWorkletThread::Create(nullptr, *reporting_proxy_);
+ Document* document = &GetDocument();
+ thread->Start(
+ std::make_unique<GlobalScopeCreationParams>(
+ document->Url(), document->UserAgent(),
+ nullptr /* content_security_policy_parsed_headers */,
+ document->GetReferrerPolicy(), document->GetSecurityOrigin(),
+ document->IsSecureContext(), nullptr /* worker_clients */,
+ document->AddressSpace(),
+ OriginTrialContext::GetTokens(document).get(),
+ base::UnguessableToken::Create(), nullptr /* worker_settings */,
+ kV8CacheOptionsDefault,
+ new WorkletModuleResponsesMap(document->Fetcher())),
+ WTF::nullopt, WorkerInspectorProxy::PauseOnWorkerStart::kDontPause,
+ ParentExecutionContextTaskRunners::Create());
+ return thread;
+ }
+
+ // Attempts to run some simple script for |thread|.
+ void CheckWorkletCanExecuteScript(WorkerThread* thread) {
+ WaitableEvent wait_event;
+ thread->GetWorkerBackingThread().BackingThread().PostTask(
+ FROM_HERE,
+ CrossThreadBind(&AudioWorkletThreadTest::ExecuteScriptInWorklet,
+ CrossThreadUnretained(this),
+ CrossThreadUnretained(thread),
+ CrossThreadUnretained(&wait_event)));
+ wait_event.Wait();
+ }
+
+ private:
+ void ExecuteScriptInWorklet(WorkerThread* thread, WaitableEvent* wait_event) {
+ ScriptState* script_state =
+ thread->GlobalScope()->ScriptController()->GetScriptState();
+ EXPECT_TRUE(script_state);
+ ScriptState::Scope scope(script_state);
+ KURL js_url("https://example.com/worklet.js");
+ ScriptModule module = ScriptModule::Compile(
+ script_state->GetIsolate(), "var counter = 0; ++counter;", js_url,
+ js_url, ScriptFetchOptions(), kSharableCrossOrigin,
+ TextPosition::MinimumPosition(), ASSERT_NO_EXCEPTION);
+ EXPECT_FALSE(module.IsNull());
+ ScriptValue exception = module.Instantiate(script_state);
+ EXPECT_TRUE(exception.IsEmpty());
+ ScriptValue value = module.Evaluate(script_state);
+ EXPECT_TRUE(value.IsEmpty());
+ wait_event->Signal();
+ }
+
+ std::unique_ptr<WorkerReportingProxy> reporting_proxy_;
+};
+
+TEST_F(AudioWorkletThreadTest, Basic) {
+ std::unique_ptr<AudioWorkletThread> worklet = CreateAudioWorkletThread();
+ CheckWorkletCanExecuteScript(worklet.get());
+ worklet->Terminate();
+ worklet->WaitForShutdownForTesting();
+}
+
+// Tests that the same WebThread is used for new worklets if the WebThread is
+// still alive.
+TEST_F(AudioWorkletThreadTest, CreateSecondAndTerminateFirst) {
+ // Create the first worklet and wait until it is initialized.
+ std::unique_ptr<AudioWorkletThread> first_worklet =
+ CreateAudioWorkletThread();
+ WebThreadSupportingGC* first_thread =
+ &first_worklet->GetWorkerBackingThread().BackingThread();
+ CheckWorkletCanExecuteScript(first_worklet.get());
+ v8::Isolate* first_isolate = first_worklet->GetIsolate();
+ ASSERT_TRUE(first_isolate);
+
+ // Create the second worklet and immediately destroy the first worklet.
+ std::unique_ptr<AudioWorkletThread> second_worklet =
+ CreateAudioWorkletThread();
+ // We don't use terminateAndWait here to avoid forcible termination.
+ first_worklet->Terminate();
+ first_worklet->WaitForShutdownForTesting();
+
+ // Wait until the second worklet is initialized. Verify that the second
+ // worklet is using the same thread and Isolate as the first worklet.
+ WebThreadSupportingGC* second_thread =
+ &second_worklet->GetWorkerBackingThread().BackingThread();
+ ASSERT_EQ(first_thread, second_thread);
+
+ v8::Isolate* second_isolate = second_worklet->GetIsolate();
+ ASSERT_TRUE(second_isolate);
+ EXPECT_EQ(first_isolate, second_isolate);
+
+ // Verify that the worklet can still successfully execute script.
+ CheckWorkletCanExecuteScript(second_worklet.get());
+
+ second_worklet->Terminate();
+ second_worklet->WaitForShutdownForTesting();
+}
+
+// Tests that a new WebThread is created if all existing worklets are
+// terminated before a new worklet is created.
+TEST_F(AudioWorkletThreadTest, TerminateFirstAndCreateSecond) {
+ // Create the first worklet, wait until it is initialized, and terminate it.
+ std::unique_ptr<AudioWorkletThread> worklet = CreateAudioWorkletThread();
+ WebThreadSupportingGC* first_thread =
+ &worklet->GetWorkerBackingThread().BackingThread();
+ CheckWorkletCanExecuteScript(worklet.get());
+
+ // We don't use terminateAndWait here to avoid forcible termination.
+ worklet->Terminate();
+ worklet->WaitForShutdownForTesting();
+
+ // Create the second worklet. The backing thread is same.
+ worklet = CreateAudioWorkletThread();
+ WebThreadSupportingGC* second_thread =
+ &worklet->GetWorkerBackingThread().BackingThread();
+ EXPECT_EQ(first_thread, second_thread);
+ CheckWorkletCanExecuteScript(worklet.get());
+
+ worklet->Terminate();
+ worklet->WaitForShutdownForTesting();
+}
+
+// Tests that v8::Isolate and WebThread are correctly set-up if a worklet is
+// created while another is terminating.
+TEST_F(AudioWorkletThreadTest, CreatingSecondDuringTerminationOfFirst) {
+ std::unique_ptr<AudioWorkletThread> first_worklet =
+ CreateAudioWorkletThread();
+ CheckWorkletCanExecuteScript(first_worklet.get());
+ v8::Isolate* first_isolate = first_worklet->GetIsolate();
+ ASSERT_TRUE(first_isolate);
+
+ // Request termination of the first worklet and create the second worklet
+ // as soon as possible. We don't wait for its termination.
+ // Note: We rely on the assumption that the termination steps don't run
+ // on the worklet thread so quickly. This could be a source of flakiness.
+ first_worklet->Terminate();
+ std::unique_ptr<AudioWorkletThread> second_worklet =
+ CreateAudioWorkletThread();
+
+ v8::Isolate* second_isolate = second_worklet->GetIsolate();
+ ASSERT_TRUE(second_isolate);
+ EXPECT_EQ(first_isolate, second_isolate);
+
+ // Verify that the isolate can run some scripts correctly in the second
+ // worklet.
+ CheckWorkletCanExecuteScript(second_worklet.get());
+ second_worklet->Terminate();
+ second_worklet->WaitForShutdownForTesting();
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/base_audio_context.cc b/chromium/third_party/blink/renderer/modules/webaudio/base_audio_context.cc
new file mode 100644
index 00000000000..5716546e4f7
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/base_audio_context.cc
@@ -0,0 +1,1038 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#include "third_party/blink/renderer/modules/webaudio/base_audio_context.h"
+
+#include "build/build_config.h"
+#include "third_party/blink/public/platform/platform.h"
+#include "third_party/blink/public/platform/task_type.h"
+#include "third_party/blink/renderer/bindings/core/v8/dictionary.h"
+#include "third_party/blink/renderer/bindings/core/v8/exception_messages.h"
+#include "third_party/blink/renderer/bindings/core/v8/exception_state.h"
+#include "third_party/blink/renderer/bindings/core/v8/script_promise_resolver.h"
+#include "third_party/blink/renderer/core/dom/document.h"
+#include "third_party/blink/renderer/core/dom/dom_exception.h"
+#include "third_party/blink/renderer/core/dom/exception_code.h"
+#include "third_party/blink/renderer/core/frame/settings.h"
+#include "third_party/blink/renderer/core/html/media/autoplay_policy.h"
+#include "third_party/blink/renderer/core/html/media/html_media_element.h"
+#include "third_party/blink/renderer/core/inspector/console_message.h"
+#include "third_party/blink/renderer/core/inspector/console_types.h"
+#include "third_party/blink/renderer/modules/mediastream/media_stream.h"
+#include "third_party/blink/renderer/modules/webaudio/analyser_node.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_buffer.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_buffer_source_node.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_context.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_listener.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node_input.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node_output.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_worklet.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_worklet_global_scope.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_worklet_messaging_proxy.h"
+#include "third_party/blink/renderer/modules/webaudio/biquad_filter_node.h"
+#include "third_party/blink/renderer/modules/webaudio/channel_merger_node.h"
+#include "third_party/blink/renderer/modules/webaudio/channel_splitter_node.h"
+#include "third_party/blink/renderer/modules/webaudio/constant_source_node.h"
+#include "third_party/blink/renderer/modules/webaudio/convolver_node.h"
+#include "third_party/blink/renderer/modules/webaudio/delay_node.h"
+#include "third_party/blink/renderer/modules/webaudio/dynamics_compressor_node.h"
+#include "third_party/blink/renderer/modules/webaudio/gain_node.h"
+#include "third_party/blink/renderer/modules/webaudio/iir_filter_node.h"
+#include "third_party/blink/renderer/modules/webaudio/media_element_audio_source_node.h"
+#include "third_party/blink/renderer/modules/webaudio/media_stream_audio_destination_node.h"
+#include "third_party/blink/renderer/modules/webaudio/media_stream_audio_source_node.h"
+#include "third_party/blink/renderer/modules/webaudio/offline_audio_completion_event.h"
+#include "third_party/blink/renderer/modules/webaudio/offline_audio_context.h"
+#include "third_party/blink/renderer/modules/webaudio/offline_audio_destination_node.h"
+#include "third_party/blink/renderer/modules/webaudio/oscillator_node.h"
+#include "third_party/blink/renderer/modules/webaudio/panner_node.h"
+#include "third_party/blink/renderer/modules/webaudio/periodic_wave.h"
+#include "third_party/blink/renderer/modules/webaudio/periodic_wave_constraints.h"
+#include "third_party/blink/renderer/modules/webaudio/script_processor_node.h"
+#include "third_party/blink/renderer/modules/webaudio/stereo_panner_node.h"
+#include "third_party/blink/renderer/modules/webaudio/wave_shaper_node.h"
+#include "third_party/blink/renderer/platform/audio/iir_filter.h"
+#include "third_party/blink/renderer/platform/bindings/script_state.h"
+#include "third_party/blink/renderer/platform/cross_thread_functional.h"
+#include "third_party/blink/renderer/platform/histogram.h"
+#include "third_party/blink/renderer/platform/wtf/text/wtf_string.h"
+
+namespace blink {
+
+BaseAudioContext* BaseAudioContext::Create(
+ Document& document,
+ const AudioContextOptions& context_options,
+ ExceptionState& exception_state) {
+ return AudioContext::Create(document, context_options, exception_state);
+}
+
+// Constructor for rendering to the audio hardware.
+BaseAudioContext::BaseAudioContext(Document* document,
+ enum ContextType context_type)
+ : PausableObject(document),
+ destination_node_(nullptr),
+ is_cleared_(false),
+ is_resolving_resume_promises_(false),
+ has_posted_cleanup_task_(false),
+ user_gesture_required_(false),
+ connection_count_(0),
+ deferred_task_handler_(DeferredTaskHandler::Create()),
+ context_state_(kSuspended),
+ closed_context_sample_rate_(-1),
+ periodic_wave_sine_(nullptr),
+ periodic_wave_square_(nullptr),
+ periodic_wave_sawtooth_(nullptr),
+ periodic_wave_triangle_(nullptr),
+ output_position_() {
+ switch (context_type) {
+ case kRealtimeContext:
+ switch (GetAutoplayPolicy()) {
+ case AutoplayPolicy::Type::kNoUserGestureRequired:
+ break;
+ case AutoplayPolicy::Type::kUserGestureRequired:
+ case AutoplayPolicy::Type::kUserGestureRequiredForCrossOrigin:
+ if (document->GetFrame() &&
+ document->GetFrame()->IsCrossOriginSubframe()) {
+ autoplay_status_ = AutoplayStatus::kAutoplayStatusFailed;
+ user_gesture_required_ = true;
+ }
+ break;
+ case AutoplayPolicy::Type::kDocumentUserActivationRequired:
+ autoplay_status_ = AutoplayStatus::kAutoplayStatusFailed;
+ user_gesture_required_ = true;
+ break;
+ }
+ break;
+ case kOfflineContext:
+ // Nothing needed for offline context
+ break;
+ default:
+ NOTREACHED();
+ break;
+ }
+}
+
+BaseAudioContext::~BaseAudioContext() {
+ GetDeferredTaskHandler().ContextWillBeDestroyed();
+ DCHECK(!active_source_nodes_.size());
+ DCHECK(!is_resolving_resume_promises_);
+ DCHECK(!resume_resolvers_.size());
+ DCHECK(!autoplay_status_.has_value());
+}
+
+void BaseAudioContext::Initialize() {
+ if (IsDestinationInitialized())
+ return;
+
+ FFTFrame::Initialize();
+
+ audio_worklet_ = AudioWorklet::Create(this);
+
+ if (destination_node_) {
+ destination_node_->Handler().Initialize();
+ // The AudioParams in the listener need access to the destination node, so
+ // only create the listener if the destination node exists.
+ listener_ = AudioListener::Create(*this);
+ }
+}
+
+void BaseAudioContext::Clear() {
+ destination_node_.Clear();
+ // The audio rendering thread is dead. Nobody will schedule AudioHandler
+ // deletion. Let's do it ourselves.
+ GetDeferredTaskHandler().ClearHandlersToBeDeleted();
+ is_cleared_ = true;
+}
+
+void BaseAudioContext::Uninitialize() {
+ DCHECK(IsMainThread());
+
+ if (!IsDestinationInitialized())
+ return;
+
+ // This stops the audio thread and all audio rendering.
+ if (destination_node_)
+ destination_node_->Handler().Uninitialize();
+
+ // Remove tail nodes since the context is done.
+ GetDeferredTaskHandler().FinishTailProcessing();
+
+ // Get rid of the sources which may still be playing.
+ ReleaseActiveSourceNodes();
+
+ // Reject any pending resolvers before we go away.
+ RejectPendingResolvers();
+ DidClose();
+
+ DCHECK(listener_);
+ listener_->WaitForHRTFDatabaseLoaderThreadCompletion();
+
+ RecordAutoplayStatus();
+
+ Clear();
+}
+
+void BaseAudioContext::ContextDestroyed(ExecutionContext*) {
+ Uninitialize();
+}
+
+bool BaseAudioContext::HasPendingActivity() const {
+ // There's no pending activity if the audio context has been cleared.
+ return !is_cleared_;
+}
+
+AudioDestinationNode* BaseAudioContext::destination() const {
+ // Cannot be called from the audio thread because this method touches objects
+ // managed by Oilpan, and the audio thread is not managed by Oilpan.
+ DCHECK(!IsAudioThread());
+ return destination_node_;
+}
+
+void BaseAudioContext::ThrowExceptionForClosedState(
+ ExceptionState& exception_state) {
+ exception_state.ThrowDOMException(kInvalidStateError,
+ "AudioContext has been closed.");
+}
+
+AudioBuffer* BaseAudioContext::createBuffer(unsigned number_of_channels,
+ size_t number_of_frames,
+ float sample_rate,
+ ExceptionState& exception_state) {
+ // It's ok to call createBuffer, even if the context is closed because the
+ // AudioBuffer doesn't really "belong" to any particular context.
+
+ AudioBuffer* buffer = AudioBuffer::Create(
+ number_of_channels, number_of_frames, sample_rate, exception_state);
+
+ if (buffer) {
+ // Only record the data if the creation succeeded.
+ DEFINE_STATIC_LOCAL(SparseHistogram, audio_buffer_channels_histogram,
+ ("WebAudio.AudioBuffer.NumberOfChannels"));
+
+ // Arbitrarly limit the maximum length to 1 million frames (about 20 sec
+ // at 48kHz). The number of buckets is fairly arbitrary.
+ DEFINE_STATIC_LOCAL(CustomCountHistogram, audio_buffer_length_histogram,
+ ("WebAudio.AudioBuffer.Length", 1, 1000000, 50));
+ // The limits are the min and max AudioBuffer sample rates currently
+ // supported. We use explicit values here instead of
+ // AudioUtilities::minAudioBufferSampleRate() and
+ // AudioUtilities::maxAudioBufferSampleRate(). The number of buckets is
+ // fairly arbitrary.
+ DEFINE_STATIC_LOCAL(
+ CustomCountHistogram, audio_buffer_sample_rate_histogram,
+ ("WebAudio.AudioBuffer.SampleRate384kHz", 3000, 384000, 60));
+
+ audio_buffer_channels_histogram.Sample(number_of_channels);
+ audio_buffer_length_histogram.Count(number_of_frames);
+ audio_buffer_sample_rate_histogram.Count(sample_rate);
+
+ // Compute the ratio of the buffer rate and the context rate so we know
+ // how often the buffer needs to be resampled to match the context. For
+ // the histogram, we multiply the ratio by 100 and round to the nearest
+ // integer. If the context is closed, don't record this because we
+ // don't have a sample rate for closed context.
+ if (!IsContextClosed()) {
+ // The limits are choosen from 100*(3000/384000) = 0.78125 and
+ // 100*(384000/3000) = 12800, where 3000 and 384000 are the current
+ // min and max sample rates possible for an AudioBuffer. The number
+ // of buckets is fairly arbitrary.
+ DEFINE_STATIC_LOCAL(
+ CustomCountHistogram, audio_buffer_sample_rate_ratio_histogram,
+ ("WebAudio.AudioBuffer.SampleRateRatio384kHz", 1, 12800, 50));
+ float ratio = 100 * sample_rate / this->sampleRate();
+ audio_buffer_sample_rate_ratio_histogram.Count(
+ static_cast<int>(0.5 + ratio));
+ }
+ }
+
+ return buffer;
+}
+
+ScriptPromise BaseAudioContext::decodeAudioData(
+ ScriptState* script_state,
+ DOMArrayBuffer* audio_data,
+ ExceptionState& exception_state) {
+ return decodeAudioData(script_state, audio_data, nullptr, nullptr,
+ exception_state);
+}
+
+ScriptPromise BaseAudioContext::decodeAudioData(
+ ScriptState* script_state,
+ DOMArrayBuffer* audio_data,
+ V8DecodeSuccessCallback* success_callback,
+ ExceptionState& exception_state) {
+ return decodeAudioData(script_state, audio_data, success_callback, nullptr,
+ exception_state);
+}
+
+ScriptPromise BaseAudioContext::decodeAudioData(
+ ScriptState* script_state,
+ DOMArrayBuffer* audio_data,
+ V8DecodeSuccessCallback* success_callback,
+ V8DecodeErrorCallback* error_callback,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+ DCHECK(audio_data);
+
+ ScriptPromiseResolver* resolver = ScriptPromiseResolver::Create(script_state);
+ ScriptPromise promise = resolver->Promise();
+
+ float rate = IsContextClosed() ? ClosedContextSampleRate() : sampleRate();
+
+ DCHECK_GT(rate, 0);
+
+ v8::Isolate* isolate = script_state->GetIsolate();
+ WTF::ArrayBufferContents buffer_contents;
+ // Detach the audio array buffer from the main thread and start
+ // async decoding of the data.
+ if (audio_data->IsNeuterable(isolate) &&
+ audio_data->Transfer(isolate, buffer_contents)) {
+ DOMArrayBuffer* audio = DOMArrayBuffer::Create(buffer_contents);
+
+ decode_audio_resolvers_.insert(resolver);
+
+ audio_decoder_.DecodeAsync(
+ audio, rate, ToV8PersistentCallbackFunction(success_callback),
+ ToV8PersistentCallbackFunction(error_callback), resolver, this);
+ } else {
+ // If audioData is already detached (neutered) we need to reject the
+ // promise with an error.
+ DOMException* error = DOMException::Create(
+ kDataCloneError, "Cannot decode detached ArrayBuffer");
+ resolver->Reject(error);
+ if (error_callback) {
+ error_callback->InvokeAndReportException(this, error);
+ }
+ }
+
+ return promise;
+}
+
+void BaseAudioContext::HandleDecodeAudioData(
+ AudioBuffer* audio_buffer,
+ ScriptPromiseResolver* resolver,
+ V8PersistentCallbackFunction<V8DecodeSuccessCallback>* success_callback,
+ V8PersistentCallbackFunction<V8DecodeErrorCallback>* error_callback) {
+ DCHECK(IsMainThread());
+
+ if (audio_buffer) {
+ // Resolve promise successfully and run the success callback
+ resolver->Resolve(audio_buffer);
+ if (success_callback)
+ success_callback->InvokeAndReportException(this, audio_buffer);
+ } else {
+ // Reject the promise and run the error callback
+ DOMException* error =
+ DOMException::Create(kEncodingError, "Unable to decode audio data");
+ resolver->Reject(error);
+ if (error_callback)
+ error_callback->InvokeAndReportException(this, error);
+ }
+
+ // We've resolved the promise. Remove it now.
+ DCHECK(decode_audio_resolvers_.Contains(resolver));
+ decode_audio_resolvers_.erase(resolver);
+}
+
+AudioBufferSourceNode* BaseAudioContext::createBufferSource(
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ AudioBufferSourceNode* node =
+ AudioBufferSourceNode::Create(*this, exception_state);
+
+ // Do not add a reference to this source node now. The reference will be added
+ // when start() is called.
+
+ return node;
+}
+
+ConstantSourceNode* BaseAudioContext::createConstantSource(
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ return ConstantSourceNode::Create(*this, exception_state);
+}
+
+MediaElementAudioSourceNode* BaseAudioContext::createMediaElementSource(
+ HTMLMediaElement* media_element,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ return MediaElementAudioSourceNode::Create(*this, *media_element,
+ exception_state);
+}
+
+MediaStreamAudioSourceNode* BaseAudioContext::createMediaStreamSource(
+ MediaStream* media_stream,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ return MediaStreamAudioSourceNode::Create(*this, *media_stream,
+ exception_state);
+}
+
+MediaStreamAudioDestinationNode* BaseAudioContext::createMediaStreamDestination(
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ // Set number of output channels to stereo by default.
+ return MediaStreamAudioDestinationNode::Create(*this, 2, exception_state);
+}
+
+ScriptProcessorNode* BaseAudioContext::createScriptProcessor(
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ return ScriptProcessorNode::Create(*this, exception_state);
+}
+
+ScriptProcessorNode* BaseAudioContext::createScriptProcessor(
+ size_t buffer_size,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ return ScriptProcessorNode::Create(*this, buffer_size, exception_state);
+}
+
+ScriptProcessorNode* BaseAudioContext::createScriptProcessor(
+ size_t buffer_size,
+ size_t number_of_input_channels,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ return ScriptProcessorNode::Create(*this, buffer_size,
+ number_of_input_channels, exception_state);
+}
+
+ScriptProcessorNode* BaseAudioContext::createScriptProcessor(
+ size_t buffer_size,
+ size_t number_of_input_channels,
+ size_t number_of_output_channels,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ return ScriptProcessorNode::Create(
+ *this, buffer_size, number_of_input_channels, number_of_output_channels,
+ exception_state);
+}
+
+StereoPannerNode* BaseAudioContext::createStereoPanner(
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ return StereoPannerNode::Create(*this, exception_state);
+}
+
+BiquadFilterNode* BaseAudioContext::createBiquadFilter(
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ return BiquadFilterNode::Create(*this, exception_state);
+}
+
+WaveShaperNode* BaseAudioContext::createWaveShaper(
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ return WaveShaperNode::Create(*this, exception_state);
+}
+
+PannerNode* BaseAudioContext::createPanner(ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ return PannerNode::Create(*this, exception_state);
+}
+
+ConvolverNode* BaseAudioContext::createConvolver(
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ return ConvolverNode::Create(*this, exception_state);
+}
+
+DynamicsCompressorNode* BaseAudioContext::createDynamicsCompressor(
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ return DynamicsCompressorNode::Create(*this, exception_state);
+}
+
+AnalyserNode* BaseAudioContext::createAnalyser(
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ return AnalyserNode::Create(*this, exception_state);
+}
+
+GainNode* BaseAudioContext::createGain(ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ return GainNode::Create(*this, exception_state);
+}
+
+DelayNode* BaseAudioContext::createDelay(ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ return DelayNode::Create(*this, exception_state);
+}
+
+DelayNode* BaseAudioContext::createDelay(double max_delay_time,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ return DelayNode::Create(*this, max_delay_time, exception_state);
+}
+
+ChannelSplitterNode* BaseAudioContext::createChannelSplitter(
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ return ChannelSplitterNode::Create(*this, exception_state);
+}
+
+ChannelSplitterNode* BaseAudioContext::createChannelSplitter(
+ size_t number_of_outputs,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ return ChannelSplitterNode::Create(*this, number_of_outputs, exception_state);
+}
+
+ChannelMergerNode* BaseAudioContext::createChannelMerger(
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ return ChannelMergerNode::Create(*this, exception_state);
+}
+
+ChannelMergerNode* BaseAudioContext::createChannelMerger(
+ size_t number_of_inputs,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ return ChannelMergerNode::Create(*this, number_of_inputs, exception_state);
+}
+
+OscillatorNode* BaseAudioContext::createOscillator(
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ return OscillatorNode::Create(*this, "sine", nullptr, exception_state);
+}
+
+PeriodicWave* BaseAudioContext::createPeriodicWave(
+ const Vector<float>& real,
+ const Vector<float>& imag,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ return PeriodicWave::Create(*this, real, imag, false, exception_state);
+}
+
+PeriodicWave* BaseAudioContext::createPeriodicWave(
+ const Vector<float>& real,
+ const Vector<float>& imag,
+ const PeriodicWaveConstraints& options,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ bool disable = options.disableNormalization();
+
+ return PeriodicWave::Create(*this, real, imag, disable, exception_state);
+}
+
+IIRFilterNode* BaseAudioContext::createIIRFilter(
+ Vector<double> feedforward_coef,
+ Vector<double> feedback_coef,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ return IIRFilterNode::Create(*this, feedforward_coef, feedback_coef,
+ exception_state);
+}
+
+PeriodicWave* BaseAudioContext::GetPeriodicWave(int type) {
+ switch (type) {
+ case OscillatorHandler::SINE:
+ // Initialize the table if necessary
+ if (!periodic_wave_sine_)
+ periodic_wave_sine_ = PeriodicWave::CreateSine(sampleRate());
+ return periodic_wave_sine_;
+ case OscillatorHandler::SQUARE:
+ // Initialize the table if necessary
+ if (!periodic_wave_square_)
+ periodic_wave_square_ = PeriodicWave::CreateSquare(sampleRate());
+ return periodic_wave_square_;
+ case OscillatorHandler::SAWTOOTH:
+ // Initialize the table if necessary
+ if (!periodic_wave_sawtooth_)
+ periodic_wave_sawtooth_ = PeriodicWave::CreateSawtooth(sampleRate());
+ return periodic_wave_sawtooth_;
+ case OscillatorHandler::TRIANGLE:
+ // Initialize the table if necessary
+ if (!periodic_wave_triangle_)
+ periodic_wave_triangle_ = PeriodicWave::CreateTriangle(sampleRate());
+ return periodic_wave_triangle_;
+ default:
+ NOTREACHED();
+ return nullptr;
+ }
+}
+
+void BaseAudioContext::MaybeRecordStartAttempt() {
+ if (!user_gesture_required_ || !AreAutoplayRequirementsFulfilled())
+ return;
+
+ DCHECK(!autoplay_status_.has_value() ||
+ autoplay_status_ != AutoplayStatus::kAutoplayStatusSucceeded);
+ autoplay_status_ = AutoplayStatus::kAutoplayStatusFailedWithStart;
+}
+
+String BaseAudioContext::state() const {
+ // These strings had better match the strings for AudioContextState in
+ // AudioContext.idl.
+ switch (context_state_) {
+ case kSuspended:
+ return "suspended";
+ case kRunning:
+ return "running";
+ case kClosed:
+ return "closed";
+ }
+ NOTREACHED();
+ return "";
+}
+
+void BaseAudioContext::SetContextState(AudioContextState new_state) {
+ DCHECK(IsMainThread());
+
+ // Validate the transitions. The valid transitions are Suspended->Running,
+ // Running->Suspended, and anything->Closed.
+ switch (new_state) {
+ case kSuspended:
+ DCHECK_EQ(context_state_, kRunning);
+ break;
+ case kRunning:
+ DCHECK_EQ(context_state_, kSuspended);
+ break;
+ case kClosed:
+ DCHECK_NE(context_state_, kClosed);
+ break;
+ }
+
+ if (new_state == context_state_) {
+ // DCHECKs above failed; just return.
+ return;
+ }
+
+ context_state_ = new_state;
+
+ // Notify context that state changed
+ if (GetExecutionContext()) {
+ GetExecutionContext()
+ ->GetTaskRunner(TaskType::kMediaElementEvent)
+ ->PostTask(FROM_HERE, WTF::Bind(&BaseAudioContext::NotifyStateChange,
+ WrapPersistent(this)));
+ }
+}
+
+void BaseAudioContext::NotifyStateChange() {
+ DispatchEvent(Event::Create(EventTypeNames::statechange));
+}
+
+void BaseAudioContext::NotifySourceNodeFinishedProcessing(
+ AudioHandler* handler) {
+ // This can be called from either the main thread or the audio thread. The
+ // mutex below protects access to |finished_source_handlers_| between the two
+ // threads.
+ MutexLocker lock(finished_source_handlers_mutex_);
+ finished_source_handlers_.push_back(handler);
+}
+
+Document* BaseAudioContext::GetDocument() const {
+ return ToDocument(GetExecutionContext());
+}
+
+AutoplayPolicy::Type BaseAudioContext::GetAutoplayPolicy() const {
+// The policy is different on Android compared to Desktop.
+#if defined(OS_ANDROID)
+ return AutoplayPolicy::Type::kUserGestureRequired;
+#else
+ // Force no user gesture required on desktop.
+ return AutoplayPolicy::Type::kNoUserGestureRequired;
+#endif
+}
+
+bool BaseAudioContext::AreAutoplayRequirementsFulfilled() const {
+ switch (GetAutoplayPolicy()) {
+ case AutoplayPolicy::Type::kNoUserGestureRequired:
+ return true;
+ case AutoplayPolicy::Type::kUserGestureRequired:
+ case AutoplayPolicy::Type::kUserGestureRequiredForCrossOrigin:
+ return Frame::HasTransientUserActivation(
+ GetDocument() ? GetDocument()->GetFrame() : nullptr);
+ case AutoplayPolicy::Type::kDocumentUserActivationRequired:
+ return AutoplayPolicy::IsDocumentAllowedToPlay(*GetDocument());
+ }
+
+ NOTREACHED();
+ return false;
+}
+
+void BaseAudioContext::NotifySourceNodeStartedProcessing(AudioNode* node) {
+ DCHECK(IsMainThread());
+ GraphAutoLocker locker(this);
+
+ active_source_nodes_.push_back(node);
+ node->Handler().MakeConnection();
+}
+
+void BaseAudioContext::ReleaseActiveSourceNodes() {
+ DCHECK(IsMainThread());
+ for (auto& source_node : active_source_nodes_)
+ source_node->Handler().BreakConnection();
+
+ active_source_nodes_.clear();
+}
+
+void BaseAudioContext::HandleStoppableSourceNodes() {
+ DCHECK(IsAudioThread());
+ DCHECK(IsGraphOwner());
+
+ if (finished_source_handlers_.size())
+ ScheduleMainThreadCleanup();
+}
+
+void BaseAudioContext::HandlePreRenderTasks(
+ const AudioIOPosition& output_position) {
+ DCHECK(IsAudioThread());
+
+ // At the beginning of every render quantum, try to update the internal
+ // rendering graph state (from main thread changes). It's OK if the tryLock()
+ // fails, we'll just take slightly longer to pick up the changes.
+ if (TryLock()) {
+ GetDeferredTaskHandler().HandleDeferredTasks();
+
+ ResolvePromisesForUnpause();
+
+ // Check to see if source nodes can be stopped because the end time has
+ // passed.
+ HandleStoppableSourceNodes();
+
+ // Update the dirty state of the listener.
+ listener()->UpdateState();
+
+ // Update output timestamp.
+ output_position_ = output_position;
+
+ unlock();
+ }
+}
+
+void BaseAudioContext::HandlePostRenderTasks() {
+ DCHECK(IsAudioThread());
+
+ // Must use a tryLock() here too. Don't worry, the lock will very rarely be
+ // contended and this method is called frequently. The worst that can happen
+ // is that there will be some nodes which will take slightly longer than usual
+ // to be deleted or removed from the render graph (in which case they'll
+ // render silence).
+ if (TryLock()) {
+ // Take care of AudioNode tasks where the tryLock() failed previously.
+ GetDeferredTaskHandler().BreakConnections();
+
+ GetDeferredTaskHandler().HandleDeferredTasks();
+ GetDeferredTaskHandler().RequestToDeleteHandlersOnMainThread();
+
+ unlock();
+ }
+}
+
+void BaseAudioContext::PerformCleanupOnMainThread() {
+ DCHECK(IsMainThread());
+ GraphAutoLocker locker(this);
+
+ if (is_resolving_resume_promises_) {
+ for (auto& resolver : resume_resolvers_) {
+ if (context_state_ == kClosed) {
+ resolver->Reject(DOMException::Create(
+ kInvalidStateError,
+ "Cannot resume a context that has been closed"));
+ } else {
+ SetContextState(kRunning);
+ resolver->Resolve();
+ }
+ }
+ resume_resolvers_.clear();
+ is_resolving_resume_promises_ = false;
+ }
+
+ if (active_source_nodes_.size()) {
+ // Find AudioBufferSourceNodes to see if we can stop playing them.
+ for (AudioNode* node : active_source_nodes_) {
+ if (node->Handler().GetNodeType() ==
+ AudioHandler::kNodeTypeAudioBufferSource) {
+ AudioBufferSourceNode* source_node =
+ static_cast<AudioBufferSourceNode*>(node);
+ source_node->GetAudioBufferSourceHandler().HandleStoppableSourceNode();
+ }
+ }
+
+ Vector<AudioHandler*> finished_handlers;
+ {
+ MutexLocker lock(finished_source_handlers_mutex_);
+ finished_source_handlers_.swap(finished_handlers);
+ }
+ // Break the connection and release active nodes that have finished
+ // playing.
+ unsigned remove_count = 0;
+ Vector<bool> removables;
+ removables.resize(active_source_nodes_.size());
+ for (AudioHandler* handler : finished_handlers) {
+ for (unsigned i = 0; i < active_source_nodes_.size(); ++i) {
+ if (handler == &active_source_nodes_[i]->Handler()) {
+ handler->BreakConnection();
+ removables[i] = true;
+ remove_count++;
+ break;
+ }
+ }
+ }
+
+ // Copy over the surviving active nodes after removal.
+ if (remove_count > 0) {
+ HeapVector<Member<AudioNode>> actives;
+ DCHECK_GE(active_source_nodes_.size(), remove_count);
+ size_t initial_capacity =
+ std::min(active_source_nodes_.size() - remove_count,
+ active_source_nodes_.size());
+ actives.ReserveInitialCapacity(initial_capacity);
+ for (unsigned i = 0; i < removables.size(); ++i) {
+ if (!removables[i])
+ actives.push_back(active_source_nodes_[i]);
+ }
+ active_source_nodes_.swap(actives);
+ }
+ }
+
+ has_posted_cleanup_task_ = false;
+}
+
+void BaseAudioContext::ScheduleMainThreadCleanup() {
+ if (has_posted_cleanup_task_)
+ return;
+ PostCrossThreadTask(
+ *Platform::Current()->MainThread()->GetTaskRunner(), FROM_HERE,
+ CrossThreadBind(&BaseAudioContext::PerformCleanupOnMainThread,
+ WrapCrossThreadPersistent(this)));
+ has_posted_cleanup_task_ = true;
+}
+
+void BaseAudioContext::ResolvePromisesForUnpause() {
+ // This runs inside the BaseAudioContext's lock when handling pre-render
+ // tasks.
+ DCHECK(IsAudioThread());
+ DCHECK(IsGraphOwner());
+
+ // Resolve any pending promises created by resume(). Only do this if we
+ // haven't already started resolving these promises. This gets called very
+ // often and it takes some time to resolve the promises in the main thread.
+ if (!is_resolving_resume_promises_ && resume_resolvers_.size() > 0) {
+ is_resolving_resume_promises_ = true;
+ ScheduleMainThreadCleanup();
+ }
+}
+
+void BaseAudioContext::RejectPendingDecodeAudioDataResolvers() {
+ // Now reject any pending decodeAudioData resolvers
+ for (auto& resolver : decode_audio_resolvers_)
+ resolver->Reject(DOMException::Create(kInvalidStateError,
+ "Audio context is going away"));
+ decode_audio_resolvers_.clear();
+}
+
+void BaseAudioContext::MaybeUnlockUserGesture() {
+ if (!user_gesture_required_ || !AreAutoplayRequirementsFulfilled())
+ return;
+
+ DCHECK(!autoplay_status_.has_value() ||
+ autoplay_status_ != AutoplayStatus::kAutoplayStatusSucceeded);
+
+ user_gesture_required_ = false;
+ autoplay_status_ = AutoplayStatus::kAutoplayStatusSucceeded;
+}
+
+bool BaseAudioContext::IsAllowedToStart() const {
+ if (!user_gesture_required_)
+ return true;
+
+ Document* document = ToDocument(GetExecutionContext());
+ DCHECK(document);
+
+ switch (GetAutoplayPolicy()) {
+ case AutoplayPolicy::Type::kNoUserGestureRequired:
+ NOTREACHED();
+ break;
+ case AutoplayPolicy::Type::kUserGestureRequired:
+ case AutoplayPolicy::Type::kUserGestureRequiredForCrossOrigin:
+ DCHECK(document->GetFrame() &&
+ document->GetFrame()->IsCrossOriginSubframe());
+ document->AddConsoleMessage(ConsoleMessage::Create(
+ kOtherMessageSource, kWarningMessageLevel,
+ "The AudioContext was not allowed to start. It must be resumed (or "
+ "created) from a user gesture event handler."));
+ break;
+ case AutoplayPolicy::Type::kDocumentUserActivationRequired:
+ document->AddConsoleMessage(ConsoleMessage::Create(
+ kOtherMessageSource, kWarningMessageLevel,
+ "The AudioContext was not allowed to start. It must be resume (or "
+ "created) after a user gesture on the page. https://goo.gl/7K7WLu"));
+ break;
+ }
+
+ return false;
+}
+
+AudioIOPosition BaseAudioContext::OutputPosition() {
+ DCHECK(IsMainThread());
+ GraphAutoLocker locker(this);
+ return output_position_;
+}
+
+void BaseAudioContext::RejectPendingResolvers() {
+ DCHECK(IsMainThread());
+
+ // Audio context is closing down so reject any resume promises that are still
+ // pending.
+
+ for (auto& resolver : resume_resolvers_) {
+ resolver->Reject(DOMException::Create(kInvalidStateError,
+ "Audio context is going away"));
+ }
+ resume_resolvers_.clear();
+ is_resolving_resume_promises_ = false;
+
+ RejectPendingDecodeAudioDataResolvers();
+}
+
+void BaseAudioContext::RecordAutoplayStatus() {
+ if (!autoplay_status_.has_value())
+ return;
+
+ DEFINE_STATIC_LOCAL(
+ EnumerationHistogram, autoplay_histogram,
+ ("WebAudio.Autoplay", AutoplayStatus::kAutoplayStatusCount));
+ DEFINE_STATIC_LOCAL(
+ EnumerationHistogram, cross_origin_autoplay_histogram,
+ ("WebAudio.Autoplay.CrossOrigin", AutoplayStatus::kAutoplayStatusCount));
+
+ autoplay_histogram.Count(autoplay_status_.value());
+
+ if (GetDocument()->GetFrame() &&
+ GetDocument()->GetFrame()->IsCrossOriginSubframe()) {
+ cross_origin_autoplay_histogram.Count(autoplay_status_.value());
+ }
+
+ autoplay_status_.reset();
+}
+
+const AtomicString& BaseAudioContext::InterfaceName() const {
+ return EventTargetNames::AudioContext;
+}
+
+ExecutionContext* BaseAudioContext::GetExecutionContext() const {
+ return PausableObject::GetExecutionContext();
+}
+
+void BaseAudioContext::StartRendering() {
+ // This is called for both online and offline contexts. The caller
+ // must set the context state appropriately. In particular, resuming
+ // a context should wait until the context has actually resumed to
+ // set the state.
+ DCHECK(IsMainThread());
+ DCHECK(destination_node_);
+ DCHECK(IsAllowedToStart());
+
+ if (context_state_ == kSuspended) {
+ destination()->GetAudioDestinationHandler().StartRendering();
+ }
+}
+
+void BaseAudioContext::Trace(blink::Visitor* visitor) {
+ visitor->Trace(destination_node_);
+ visitor->Trace(listener_);
+ visitor->Trace(active_source_nodes_);
+ visitor->Trace(resume_resolvers_);
+ visitor->Trace(decode_audio_resolvers_);
+ visitor->Trace(periodic_wave_sine_);
+ visitor->Trace(periodic_wave_square_);
+ visitor->Trace(periodic_wave_sawtooth_);
+ visitor->Trace(periodic_wave_triangle_);
+ visitor->Trace(audio_worklet_);
+ EventTargetWithInlineData::Trace(visitor);
+ PausableObject::Trace(visitor);
+}
+
+const SecurityOrigin* BaseAudioContext::GetSecurityOrigin() const {
+ if (GetExecutionContext())
+ return GetExecutionContext()->GetSecurityOrigin();
+
+ return nullptr;
+}
+
+AudioWorklet* BaseAudioContext::audioWorklet() const {
+ return audio_worklet_.Get();
+}
+
+void BaseAudioContext::NotifyWorkletIsReady() {
+ DCHECK(IsMainThread());
+ DCHECK(audioWorklet()->IsReady());
+
+ // At this point, the WorkletGlobalScope must be ready so it is safe to keep
+ // the reference to the AudioWorkletThread for the future worklet operation.
+ audio_worklet_thread_ =
+ audioWorklet()->GetMessagingProxy()->GetBackingWorkerThread();
+
+ // If the context is running or suspended, restart the destination to switch
+ // the render thread with the worklet thread. Note that restarting can happen
+ // right after the context construction.
+ if (ContextState() != kClosed) {
+ destination()->GetAudioDestinationHandler().RestartRendering();
+ }
+}
+
+void BaseAudioContext::UpdateWorkletGlobalScopeOnRenderingThread() {
+ DCHECK(!IsMainThread());
+
+ if (audio_worklet_thread_) {
+ AudioWorkletGlobalScope* global_scope =
+ ToAudioWorkletGlobalScope(audio_worklet_thread_->GlobalScope());
+ DCHECK(global_scope);
+ global_scope->SetCurrentFrame(CurrentSampleFrame());
+ }
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/base_audio_context.h b/chromium/third_party/blink/renderer/modules/webaudio/base_audio_context.h
new file mode 100644
index 00000000000..19527b1bbf8
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/base_audio_context.h
@@ -0,0 +1,531 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_BASE_AUDIO_CONTEXT_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_BASE_AUDIO_CONTEXT_H_
+
+#include "base/memory/scoped_refptr.h"
+#include "third_party/blink/renderer/bindings/core/v8/active_script_wrappable.h"
+#include "third_party/blink/renderer/bindings/core/v8/script_promise.h"
+#include "third_party/blink/renderer/bindings/core/v8/script_promise_resolver.h"
+#include "third_party/blink/renderer/bindings/modules/v8/v8_decode_error_callback.h"
+#include "third_party/blink/renderer/bindings/modules/v8/v8_decode_success_callback.h"
+#include "third_party/blink/renderer/core/dom/events/event_listener.h"
+#include "third_party/blink/renderer/core/dom/pausable_object.h"
+#include "third_party/blink/renderer/core/html/media/autoplay_policy.h"
+#include "third_party/blink/renderer/core/typed_arrays/array_buffer_view_helpers.h"
+#include "third_party/blink/renderer/core/typed_arrays/dom_typed_array.h"
+#include "third_party/blink/renderer/modules/event_target_modules.h"
+#include "third_party/blink/renderer/modules/modules_export.h"
+#include "third_party/blink/renderer/modules/webaudio/async_audio_decoder.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_destination_node.h"
+#include "third_party/blink/renderer/modules/webaudio/deferred_task_handler.h"
+#include "third_party/blink/renderer/modules/webaudio/iir_filter_node.h"
+#include "third_party/blink/renderer/platform/audio/audio_bus.h"
+#include "third_party/blink/renderer/platform/heap/handle.h"
+#include "third_party/blink/renderer/platform/wtf/hash_set.h"
+#include "third_party/blink/renderer/platform/wtf/threading.h"
+#include "third_party/blink/renderer/platform/wtf/vector.h"
+
+namespace blink {
+
+class AnalyserNode;
+class AudioBuffer;
+class AudioBufferSourceNode;
+class AudioContextOptions;
+class AudioListener;
+class AudioWorklet;
+class BiquadFilterNode;
+class ChannelMergerNode;
+class ChannelSplitterNode;
+class ConstantSourceNode;
+class ConvolverNode;
+class DelayNode;
+class Document;
+class DynamicsCompressorNode;
+class ExceptionState;
+class GainNode;
+class HTMLMediaElement;
+class IIRFilterNode;
+class MediaElementAudioSourceNode;
+class MediaStream;
+class MediaStreamAudioDestinationNode;
+class MediaStreamAudioSourceNode;
+class OscillatorNode;
+class PannerNode;
+class PeriodicWave;
+class PeriodicWaveConstraints;
+class ScriptProcessorNode;
+class ScriptPromiseResolver;
+class ScriptState;
+class SecurityOrigin;
+class StereoPannerNode;
+class WaveShaperNode;
+class WorkerThread;
+
+// BaseAudioContext is the cornerstone of the web audio API and all AudioNodes
+// are created from it. For thread safety between the audio thread and the main
+// thread, it has a rendering graph locking mechanism.
+
+class MODULES_EXPORT BaseAudioContext
+ : public EventTargetWithInlineData,
+ public ActiveScriptWrappable<BaseAudioContext>,
+ public PausableObject {
+ USING_GARBAGE_COLLECTED_MIXIN(BaseAudioContext);
+ DEFINE_WRAPPERTYPEINFO();
+
+ public:
+ // The state of an audio context. On creation, the state is Suspended. The
+ // state is Running if audio is being processed (audio graph is being pulled
+ // for data). The state is Closed if the audio context has been closed. The
+ // valid transitions are from Suspended to either Running or Closed; Running
+ // to Suspended or Closed. Once Closed, there are no valid transitions.
+ enum AudioContextState { kSuspended, kRunning, kClosed };
+
+ // Create an AudioContext for rendering to the audio hardware.
+ static BaseAudioContext* Create(Document&,
+ const AudioContextOptions&,
+ ExceptionState&);
+
+ ~BaseAudioContext() override;
+
+ void Trace(blink::Visitor*) override;
+
+ // Is the destination node initialized and ready to handle audio?
+ bool IsDestinationInitialized() const {
+ AudioDestinationNode* dest = destination();
+ return dest ? dest->GetAudioDestinationHandler().IsInitialized() : false;
+ }
+
+ // Document notification
+ void ContextDestroyed(ExecutionContext*) final;
+ bool HasPendingActivity() const;
+
+ // Cannnot be called from the audio thread.
+ AudioDestinationNode* destination() const;
+
+ size_t CurrentSampleFrame() const {
+ // TODO: What is the correct value for the current frame if the destination
+ // node has gone away? 0 is a valid frame.
+ return destination_node_ ? destination_node_->GetAudioDestinationHandler()
+ .CurrentSampleFrame()
+ : 0;
+ }
+
+ double currentTime() const {
+ // TODO: What is the correct value for the current time if the destination
+ // node has gone away? 0 is a valid time.
+ return destination_node_
+ ? destination_node_->GetAudioDestinationHandler().CurrentTime()
+ : 0;
+ }
+
+ float sampleRate() const {
+ return destination_node_
+ ? destination_node_->GetAudioDestinationHandler().SampleRate()
+ : ClosedContextSampleRate();
+ }
+
+ float FramesPerBuffer() const {
+ return destination_node_ ? destination_node_->GetAudioDestinationHandler()
+ .FramesPerBuffer()
+ : 0;
+ }
+
+ size_t CallbackBufferSize() const {
+ return destination_node_ ? destination_node_->Handler().CallbackBufferSize()
+ : 0;
+ }
+
+ String state() const;
+ AudioContextState ContextState() const { return context_state_; }
+ void ThrowExceptionForClosedState(ExceptionState&);
+
+ AudioBuffer* createBuffer(unsigned number_of_channels,
+ size_t number_of_frames,
+ float sample_rate,
+ ExceptionState&);
+
+ // Asynchronous audio file data decoding.
+ ScriptPromise decodeAudioData(ScriptState*,
+ DOMArrayBuffer* audio_data,
+ V8DecodeSuccessCallback*,
+ V8DecodeErrorCallback*,
+ ExceptionState&);
+
+ ScriptPromise decodeAudioData(ScriptState*,
+ DOMArrayBuffer* audio_data,
+ ExceptionState&);
+
+ ScriptPromise decodeAudioData(ScriptState*,
+ DOMArrayBuffer* audio_data,
+ V8DecodeSuccessCallback*,
+ ExceptionState&);
+
+ // Handles the promise and callbacks when |decodeAudioData| is finished
+ // decoding.
+ void HandleDecodeAudioData(
+ AudioBuffer*,
+ ScriptPromiseResolver*,
+ V8PersistentCallbackFunction<V8DecodeSuccessCallback>*,
+ V8PersistentCallbackFunction<V8DecodeErrorCallback>*);
+
+ AudioListener* listener() { return listener_; }
+
+ virtual bool HasRealtimeConstraint() = 0;
+
+ // The AudioNode create methods are called on the main thread (from
+ // JavaScript).
+ AudioBufferSourceNode* createBufferSource(ExceptionState&);
+ ConstantSourceNode* createConstantSource(ExceptionState&);
+ MediaElementAudioSourceNode* createMediaElementSource(HTMLMediaElement*,
+ ExceptionState&);
+ MediaStreamAudioSourceNode* createMediaStreamSource(MediaStream*,
+ ExceptionState&);
+ MediaStreamAudioDestinationNode* createMediaStreamDestination(
+ ExceptionState&);
+ GainNode* createGain(ExceptionState&);
+ BiquadFilterNode* createBiquadFilter(ExceptionState&);
+ WaveShaperNode* createWaveShaper(ExceptionState&);
+ DelayNode* createDelay(ExceptionState&);
+ DelayNode* createDelay(double max_delay_time, ExceptionState&);
+ PannerNode* createPanner(ExceptionState&);
+ ConvolverNode* createConvolver(ExceptionState&);
+ DynamicsCompressorNode* createDynamicsCompressor(ExceptionState&);
+ AnalyserNode* createAnalyser(ExceptionState&);
+ ScriptProcessorNode* createScriptProcessor(ExceptionState&);
+ ScriptProcessorNode* createScriptProcessor(size_t buffer_size,
+ ExceptionState&);
+ ScriptProcessorNode* createScriptProcessor(size_t buffer_size,
+ size_t number_of_input_channels,
+ ExceptionState&);
+ ScriptProcessorNode* createScriptProcessor(size_t buffer_size,
+ size_t number_of_input_channels,
+ size_t number_of_output_channels,
+ ExceptionState&);
+ StereoPannerNode* createStereoPanner(ExceptionState&);
+ ChannelSplitterNode* createChannelSplitter(ExceptionState&);
+ ChannelSplitterNode* createChannelSplitter(size_t number_of_outputs,
+ ExceptionState&);
+ ChannelMergerNode* createChannelMerger(ExceptionState&);
+ ChannelMergerNode* createChannelMerger(size_t number_of_inputs,
+ ExceptionState&);
+ OscillatorNode* createOscillator(ExceptionState&);
+ PeriodicWave* createPeriodicWave(const Vector<float>& real,
+ const Vector<float>& imag,
+ ExceptionState&);
+ PeriodicWave* createPeriodicWave(const Vector<float>& real,
+ const Vector<float>& imag,
+ const PeriodicWaveConstraints&,
+ ExceptionState&);
+
+ // Suspend
+ virtual ScriptPromise suspendContext(ScriptState*) = 0;
+
+ // Resume
+ virtual ScriptPromise resumeContext(ScriptState*) = 0;
+
+ // IIRFilter
+ IIRFilterNode* createIIRFilter(Vector<double> feedforward_coef,
+ Vector<double> feedback_coef,
+ ExceptionState&);
+
+ // When a source node has started processing and needs to be protected,
+ // this method tells the context to protect the node.
+ //
+ // The context itself keeps a reference to all source nodes. The source
+ // nodes, then reference all nodes they're connected to. In turn, these
+ // nodes reference all nodes they're connected to. All nodes are ultimately
+ // connected to the AudioDestinationNode. When the context release a source
+ // node, it will be deactivated from the rendering graph along with all
+ // other nodes it is uniquely connected to.
+ void NotifySourceNodeStartedProcessing(AudioNode*);
+ // When a source node has no more processing to do (has finished playing),
+ // this method tells the context to release the corresponding node.
+ void NotifySourceNodeFinishedProcessing(AudioHandler*);
+
+ // Called at the start of each render quantum.
+ void HandlePreRenderTasks(const AudioIOPosition& output_position);
+
+ // Called at the end of each render quantum.
+ void HandlePostRenderTasks();
+
+ // Keeps track of the number of connections made.
+ void IncrementConnectionCount() {
+ DCHECK(IsMainThread());
+ connection_count_++;
+ }
+
+ unsigned ConnectionCount() const { return connection_count_; }
+
+ DeferredTaskHandler& GetDeferredTaskHandler() const {
+ return *deferred_task_handler_;
+ }
+ //
+ // Thread Safety and Graph Locking:
+ //
+ // The following functions call corresponding functions of
+ // DeferredTaskHandler.
+ bool IsAudioThread() const {
+ return GetDeferredTaskHandler().IsAudioThread();
+ }
+ void lock() { GetDeferredTaskHandler().lock(); }
+ bool TryLock() { return GetDeferredTaskHandler().TryLock(); }
+ void unlock() { GetDeferredTaskHandler().unlock(); }
+
+ // Returns true if this thread owns the context's lock.
+ bool IsGraphOwner() { return GetDeferredTaskHandler().IsGraphOwner(); }
+
+ using GraphAutoLocker = DeferredTaskHandler::GraphAutoLocker;
+
+ // Returns the maximum numuber of channels we can support.
+ static unsigned MaxNumberOfChannels() { return kMaxNumberOfChannels; }
+
+ // EventTarget
+ const AtomicString& InterfaceName() const final;
+ ExecutionContext* GetExecutionContext() const final;
+
+ DEFINE_ATTRIBUTE_EVENT_LISTENER(statechange);
+
+ // Start the AudioContext. `isAllowedToStart()` MUST be called
+ // before. This does NOT set the context state to running. The
+ // caller must set the state AFTER calling startRendering.
+ void StartRendering();
+
+ void NotifyStateChange();
+
+ // A context is considered closed if:
+ // - closeContext() has been called.
+ // - it has been stopped by its execution context.
+ virtual bool IsContextClosed() const { return is_cleared_; }
+
+ // Get the security origin for this audio context.
+ const SecurityOrigin* GetSecurityOrigin() const;
+
+ // Get the PeriodicWave for the specified oscillator type. The table is
+ // initialized internally if necessary.
+ PeriodicWave* GetPeriodicWave(int type);
+
+ // For metrics purpose, records when start() is called on a
+ // AudioScheduledSourceHandler or a AudioBufferSourceHandler without a user
+ // gesture while the AudioContext requires a user gesture.
+ void MaybeRecordStartAttempt();
+
+ // AudioWorklet IDL
+ AudioWorklet* audioWorklet() const;
+
+ // Callback from AudioWorklet, invoked when the associated
+ // AudioWorkletGlobalScope is created and the worklet operation is ready after
+ // the first script evaluation.
+ void NotifyWorkletIsReady();
+
+ // Update the information in AudioWorkletGlobalScope synchronously on the
+ // worklet rendering thread. Must be called from the rendering thread.
+ // Does nothing when the worklet global scope does not exist.
+ void UpdateWorkletGlobalScopeOnRenderingThread();
+
+ protected:
+ enum ContextType { kRealtimeContext, kOfflineContext };
+
+ explicit BaseAudioContext(Document*, enum ContextType);
+
+ void Initialize();
+ void Uninitialize();
+
+ void SetContextState(AudioContextState);
+
+ virtual void DidClose() {}
+
+ // Tries to handle AudioBufferSourceNodes that were started but became
+ // disconnected or was never connected. Because these never get pulled
+ // anymore, they will stay around forever. So if we can, try to stop them so
+ // they can be collected.
+ void HandleStoppableSourceNodes();
+
+ Member<AudioDestinationNode> destination_node_;
+
+ // FIXME(dominicc): Move m_resumeResolvers to AudioContext, because only
+ // it creates these Promises.
+ // Vector of promises created by resume(). It takes time to handle them, so we
+ // collect all of the promises here until they can be resolved or rejected.
+ HeapVector<Member<ScriptPromiseResolver>> resume_resolvers_;
+
+ void SetClosedContextSampleRate(float new_sample_rate) {
+ closed_context_sample_rate_ = new_sample_rate;
+ }
+ float ClosedContextSampleRate() const { return closed_context_sample_rate_; }
+
+ void RejectPendingDecodeAudioDataResolvers();
+
+ // If any, unlock user gesture requirements if a user gesture is being
+ // processed.
+ void MaybeUnlockUserGesture();
+
+ // Returns whether the AudioContext is allowed to start rendering.
+ bool IsAllowedToStart() const;
+
+ AudioIOPosition OutputPosition();
+
+ private:
+ friend class BaseAudioContextAutoplayTest;
+ friend class DISABLED_BaseAudioContextAutoplayTest;
+
+ // Do not change the order of this enum, it is used for metrics.
+ enum AutoplayStatus {
+ // The AudioContext failed to activate because of user gesture requirements.
+ kAutoplayStatusFailed = 0,
+ // Same as AutoplayStatusFailed but start() on a node was called with a user
+ // gesture.
+ kAutoplayStatusFailedWithStart = 1,
+ // The AudioContext had user gesture requirements and was able to activate
+ // with a user gesture.
+ kAutoplayStatusSucceeded = 2,
+
+ // Keep at the end.
+ kAutoplayStatusCount
+ };
+
+ bool is_cleared_;
+ void Clear();
+
+ // When the context goes away, there might still be some sources which
+ // haven't finished playing. Make sure to release them here.
+ void ReleaseActiveSourceNodes();
+
+ // Returns the Document wich wich the instance is associated.
+ Document* GetDocument() const;
+
+ // Returns the AutoplayPolicy currently applying to this instance.
+ AutoplayPolicy::Type GetAutoplayPolicy() const;
+
+ // Returns whether the autoplay requirements are fulfilled.
+ bool AreAutoplayRequirementsFulfilled() const;
+
+ // Listener for the PannerNodes
+ Member<AudioListener> listener_;
+
+ // Accessed by audio thread and main thread, coordinated using
+ // the associated mutex.
+ //
+ // These raw pointers are safe because AudioSourceNodes in
+ // active_source_nodes_ own them.
+ Mutex finished_source_handlers_mutex_;
+ Vector<AudioHandler*> finished_source_handlers_;
+
+ // List of source nodes. This is either accessed when the graph lock is
+ // held, or on the main thread when the audio thread has finished.
+ // Oilpan: This Vector holds connection references. We must call
+ // AudioHandler::makeConnection when we add an AudioNode to this, and must
+ // call AudioHandler::breakConnection() when we remove an AudioNode from
+ // this.
+ HeapVector<Member<AudioNode>> active_source_nodes_;
+
+ // Called by the audio thread to handle Promises for resume() and suspend(),
+ // posting a main thread task to perform the actual resolving, if needed.
+ //
+ // TODO(dominicc): Move to AudioContext because only it creates
+ // these Promises.
+ void ResolvePromisesForUnpause();
+
+ // The audio thread relies on the main thread to perform some operations
+ // over the objects that it owns and controls; |ScheduleMainThreadCleanup()|
+ // posts the task to initiate those.
+ //
+ // That is, we combine all those sub-tasks into one task action for
+ // convenience and performance, |PerformCleanupOnMainThread()|. It handles
+ // promise resolving, stopping and finishing up of audio source nodes etc.
+ // Actions that should happen, but can happen asynchronously to the
+ // audio thread making rendering progress.
+ void ScheduleMainThreadCleanup();
+ void PerformCleanupOnMainThread();
+
+ // When the context is going away, reject any pending script promise
+ // resolvers.
+ virtual void RejectPendingResolvers();
+
+ // Record the current autoplay status and clear it.
+ void RecordAutoplayStatus();
+
+ // True if we're in the process of resolving promises for resume(). Resolving
+ // can take some time and the audio context process loop is very fast, so we
+ // don't want to call resolve an excessive number of times.
+ bool is_resolving_resume_promises_;
+
+ // Set to |true| by the audio thread when it posts a main-thread task to
+ // perform delayed state sync'ing updates that needs to be done on the main
+ // thread. Cleared by the main thread task once it has run.
+ bool has_posted_cleanup_task_;
+
+ // Whether a user gesture is required to start this AudioContext.
+ bool user_gesture_required_;
+
+ unsigned connection_count_;
+
+ // Graph locking.
+ scoped_refptr<DeferredTaskHandler> deferred_task_handler_;
+
+ // The state of the BaseAudioContext.
+ AudioContextState context_state_;
+
+ AsyncAudioDecoder audio_decoder_;
+
+ // When a context is closed, the sample rate is cleared. But decodeAudioData
+ // can be called after the context has been closed and it needs the sample
+ // rate. When the context is closed, the sample rate is saved here.
+ float closed_context_sample_rate_;
+
+ // Vector of promises created by decodeAudioData. This keeps the resolvers
+ // alive until decodeAudioData finishes decoding and can tell the main thread
+ // to resolve them.
+ HeapHashSet<Member<ScriptPromiseResolver>> decode_audio_resolvers_;
+
+ // PeriodicWave's for the builtin oscillator types. These only depend on the
+ // sample rate. so they can be shared with all OscillatorNodes in the context.
+ // To conserve memory, these are lazily initialized on first use.
+ Member<PeriodicWave> periodic_wave_sine_;
+ Member<PeriodicWave> periodic_wave_square_;
+ Member<PeriodicWave> periodic_wave_sawtooth_;
+ Member<PeriodicWave> periodic_wave_triangle_;
+
+ // This is considering 32 is large enough for multiple channels audio.
+ // It is somewhat arbitrary and could be increased if necessary.
+ enum { kMaxNumberOfChannels = 32 };
+
+ Optional<AutoplayStatus> autoplay_status_;
+ AudioIOPosition output_position_;
+
+ Member<AudioWorklet> audio_worklet_;
+
+ // In order to update some information (e.g. current frame) in
+ // AudioWorkletGlobalScope *synchronously*, the context needs to keep the
+ // reference to the WorkerThread associated with the AudioWorkletGlobalScope.
+ // This cannot be nullptr once it is assigned from AudioWorkletThread until
+ // the BaseAudioContext goes away.
+ WorkerThread* audio_worklet_thread_ = nullptr;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_BASE_AUDIO_CONTEXT_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/base_audio_context.idl b/chromium/third_party/blink/renderer/modules/webaudio/base_audio_context.idl
new file mode 100644
index 00000000000..26ceacf40df
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/base_audio_context.idl
@@ -0,0 +1,74 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See https://webaudio.github.io/web-audio-api/#BaseAudioContext
+
+enum AudioContextState {
+ "suspended",
+ "running",
+ "closed"
+};
+
+callback DecodeErrorCallback = void (DOMException error);
+callback DecodeSuccessCallback = void (AudioBuffer decodedData);
+
+[
+ ActiveScriptWrappable
+] interface BaseAudioContext : EventTarget {
+ // All rendered audio ultimately connects to destination, which represents the audio hardware.
+ readonly attribute AudioDestinationNode destination;
+
+ // All scheduled times are relative to this time in seconds.
+ readonly attribute double currentTime;
+
+ // All AudioNodes in the context run at this sample-rate (sample-frames per second).
+ readonly attribute float sampleRate;
+
+ // All panning is relative to this listener.
+ readonly attribute AudioListener listener;
+
+ // Current state of the AudioContext
+ readonly attribute AudioContextState state;
+
+ [RaisesException] AudioBuffer createBuffer(unsigned long numberOfChannels, unsigned long numberOfFrames, float sampleRate);
+
+ // Asynchronous audio file data decoding.
+ [RaisesException, MeasureAs=AudioContextDecodeAudioData, CallWith=ScriptState] Promise<AudioBuffer> decodeAudioData(ArrayBuffer audioData, optional DecodeSuccessCallback successCallback, optional DecodeErrorCallback errorCallback);
+
+ // Sources
+ [RaisesException, MeasureAs=AudioContextCreateBufferSource] AudioBufferSourceNode createBufferSource();
+ [RaisesException, MeasureAs=AudioContextCreateConstantSource] ConstantSourceNode createConstantSource();
+
+ // Processing nodes
+ [RaisesException, MeasureAs=AudioContextCreateGain] GainNode createGain();
+ [RaisesException, MeasureAs=AudioContextCreateDelay] DelayNode createDelay(optional double maxDelayTime);
+ [RaisesException, MeasureAs=AudioContextCreateBiquadFilter] BiquadFilterNode createBiquadFilter();
+ [RaisesException, MeasureAs=AudioContextCreateIIRFilter] IIRFilterNode createIIRFilter(sequence<double> feedForward, sequence<double> feedBack);
+ [RaisesException, MeasureAs=AudioContextCreateWaveShaper] WaveShaperNode createWaveShaper();
+ [RaisesException, MeasureAs=AudioContextCreatePannerAutomated] PannerNode createPanner();
+ [RaisesException, MeasureAs=AudioContextCreateConvolver] ConvolverNode createConvolver();
+ [RaisesException, MeasureAs=AudioContextCreateDynamicsCompressor] DynamicsCompressorNode createDynamicsCompressor();
+ [RaisesException, MeasureAs=AudioContextCreateAnalyser] AnalyserNode createAnalyser();
+ [RaisesException, MeasureAs=AudioContextCreateScriptProcessor] ScriptProcessorNode createScriptProcessor(optional unsigned long bufferSize, optional unsigned long numberOfInputChannels, optional unsigned long numberOfOutputChannels);
+ [RaisesException, MeasureAs=AudioContextCreateStereoPanner] StereoPannerNode createStereoPanner();
+ [RaisesException, MeasureAs=AudioContextCreateOscillator] OscillatorNode createOscillator();
+ [RaisesException, MeasureAs=AudioContextCreatePeriodicWave] PeriodicWave createPeriodicWave(sequence<float> real, sequence<float> imag, optional PeriodicWaveConstraints options);
+
+ // Channel splitting and merging
+ [RaisesException, MeasureAs=AudioContextCreateChannelSplitter] ChannelSplitterNode createChannelSplitter(optional unsigned long numberOfOutputs);
+ [RaisesException, MeasureAs=AudioContextCreateChannelMerger] ChannelMergerNode createChannelMerger(optional unsigned long numberOfInputs);
+
+ // Pause/resume
+ [MeasureAs=AudioContextResume, CallWith=ScriptState, ImplementedAs=resumeContext] Promise<void> resume();
+
+ // TODO(rtoy): These really belong to the AudioContext, but we need them
+ // here so we can use an offline audio context to test these.
+ [RaisesException, MeasureAs=AudioContextCreateMediaElementSource] MediaElementAudioSourceNode createMediaElementSource(HTMLMediaElement mediaElement);
+ [RaisesException, MeasureAs=AudioContextCreateMediaStreamSource] MediaStreamAudioSourceNode createMediaStreamSource(MediaStream mediaStream);
+ [RaisesException, MeasureAs=AudioContextCreateMediaStreamDestination] MediaStreamAudioDestinationNode createMediaStreamDestination();
+
+ [SecureContext] readonly attribute AudioWorklet audioWorklet;
+
+ attribute EventHandler onstatechange;
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/base_audio_context_test.cc b/chromium/third_party/blink/renderer/modules/webaudio/base_audio_context_test.cc
new file mode 100644
index 00000000000..ff72577c785
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/base_audio_context_test.cc
@@ -0,0 +1,751 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/modules/webaudio/base_audio_context.h"
+
+#include <memory>
+
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/blink/public/platform/platform.h"
+#include "third_party/blink/public/platform/web_audio_device.h"
+#include "third_party/blink/public/platform/web_audio_latency_hint.h"
+#include "third_party/blink/renderer/bindings/core/v8/v8_binding_for_core.h"
+#include "third_party/blink/renderer/core/dom/document.h"
+#include "third_party/blink/renderer/core/dom/user_gesture_indicator.h"
+#include "third_party/blink/renderer/core/frame/frame_owner.h"
+#include "third_party/blink/renderer/core/frame/frame_types.h"
+#include "third_party/blink/renderer/core/frame/local_frame.h"
+#include "third_party/blink/renderer/core/frame/local_frame_view.h"
+#include "third_party/blink/renderer/core/frame/settings.h"
+#include "third_party/blink/renderer/core/html/media/autoplay_policy.h"
+#include "third_party/blink/renderer/core/loader/document_loader.h"
+#include "third_party/blink/renderer/core/loader/empty_clients.h"
+#include "third_party/blink/renderer/core/testing/dummy_page_holder.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_context_options.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_worklet_thread.h"
+#include "third_party/blink/renderer/platform/testing/histogram_tester.h"
+#include "third_party/blink/renderer/platform/testing/testing_platform_support.h"
+
+namespace blink {
+
+namespace {
+
+const char* const kAutoplayMetric = "WebAudio.Autoplay";
+const char* const kAutoplayCrossOriginMetric = "WebAudio.Autoplay.CrossOrigin";
+
+class MockCrossOriginLocalFrameClient final : public EmptyLocalFrameClient {
+ public:
+ static MockCrossOriginLocalFrameClient* Create(Frame* parent) {
+ return new MockCrossOriginLocalFrameClient(parent);
+ }
+
+ virtual void Trace(blink::Visitor* visitor) {
+ visitor->Trace(parent_);
+ EmptyLocalFrameClient::Trace(visitor);
+ }
+
+ Frame* Parent() const override { return parent_.Get(); }
+ Frame* Top() const override { return parent_.Get(); }
+
+ private:
+ explicit MockCrossOriginLocalFrameClient(Frame* parent) : parent_(parent) {}
+
+ Member<Frame> parent_;
+};
+
+class MockWebAudioDeviceForBaseAudioContext : public WebAudioDevice {
+ public:
+ explicit MockWebAudioDeviceForBaseAudioContext(double sample_rate,
+ int frames_per_buffer)
+ : sample_rate_(sample_rate), frames_per_buffer_(frames_per_buffer) {}
+ ~MockWebAudioDeviceForBaseAudioContext() override = default;
+
+ void Start() override {}
+ void Stop() override {}
+ double SampleRate() override { return sample_rate_; }
+ int FramesPerBuffer() override { return frames_per_buffer_; }
+
+ private:
+ double sample_rate_;
+ int frames_per_buffer_;
+};
+
+class BaseAudioContextTestPlatform : public TestingPlatformSupport {
+ public:
+ std::unique_ptr<WebAudioDevice> CreateAudioDevice(
+ unsigned number_of_input_channels,
+ unsigned number_of_channels,
+ const WebAudioLatencyHint& latency_hint,
+ WebAudioDevice::RenderCallback*,
+ const WebString& device_id,
+ const WebSecurityOrigin&) override {
+ return std::make_unique<MockWebAudioDeviceForBaseAudioContext>(
+ AudioHardwareSampleRate(), AudioHardwareBufferSize());
+ }
+
+ std::unique_ptr<WebThread> CreateThread(
+ const WebThreadCreationParams& params) override {
+ // return base::WrapUnique(old_platform_->CurrentThread());
+ return old_platform_->CreateThread(params);
+ }
+
+ double AudioHardwareSampleRate() override { return 44100; }
+ size_t AudioHardwareBufferSize() override { return 128; }
+};
+
+} // anonymous namespace
+
+// Often times out on all platforms: https://crbug.com/763550.
+#define MAYBE_TEST_P(test_case_name, test_name) \
+ TEST_P(test_case_name, DISABLED_##test_name)
+
+class BaseAudioContextAutoplayTest
+ : public testing::TestWithParam<AutoplayPolicy::Type> {
+ protected:
+ using AutoplayStatus = BaseAudioContext::AutoplayStatus;
+
+ void SetUp() override {
+ dummy_page_holder_ = DummyPageHolder::Create();
+ dummy_frame_owner_ = DummyFrameOwner::Create();
+ GetDocument().UpdateSecurityOrigin(
+ SecurityOrigin::Create("https", "example.com", 80));
+
+ CreateChildFrame();
+
+ GetDocument().GetSettings()->SetAutoplayPolicy(GetParam());
+ ChildDocument().GetSettings()->SetAutoplayPolicy(GetParam());
+
+ histogram_tester_ = std::make_unique<HistogramTester>();
+ AudioWorkletThread::CreateSharedBackingThreadForTest();
+ }
+
+ void TearDown() override {
+ if (child_frame_)
+ child_frame_->Detach(FrameDetachType::kRemove);
+
+ AudioWorkletThread::ClearSharedBackingThread();
+ }
+
+ void CreateChildFrame() {
+ child_frame_ = LocalFrame::Create(
+ MockCrossOriginLocalFrameClient::Create(GetDocument().GetFrame()),
+ *GetDocument().GetFrame()->GetPage(), dummy_frame_owner_.Get());
+ child_frame_->SetView(
+ LocalFrameView::Create(*child_frame_, IntSize(500, 500)));
+ child_frame_->Init();
+
+ ChildDocument().UpdateSecurityOrigin(
+ SecurityOrigin::Create("https", "cross-origin.com", 80));
+ }
+
+ Document& GetDocument() { return dummy_page_holder_->GetDocument(); }
+
+ Document& ChildDocument() { return *child_frame_->GetDocument(); }
+
+ ScriptState* GetScriptStateFrom(const Document& document) {
+ return ToScriptStateForMainWorld(document.GetFrame());
+ }
+
+ void RejectPendingResolvers(BaseAudioContext* audio_context) {
+ audio_context->RejectPendingResolvers();
+ }
+
+ void RecordAutoplayStatus(BaseAudioContext* audio_context) {
+ audio_context->RecordAutoplayStatus();
+ }
+
+ HistogramTester* GetHistogramTester() {
+ return histogram_tester_.get();
+ }
+
+ private:
+ std::unique_ptr<DummyPageHolder> dummy_page_holder_;
+ Persistent<DummyFrameOwner> dummy_frame_owner_;
+ Persistent<LocalFrame> child_frame_;
+ std::unique_ptr<HistogramTester> histogram_tester_;
+ ScopedTestingPlatformSupport<BaseAudioContextTestPlatform> platform_;
+};
+
+// Creates an AudioContext without a gesture inside a x-origin child frame.
+MAYBE_TEST_P(BaseAudioContextAutoplayTest,
+ AutoplayMetrics_CreateNoGesture_Child) {
+ BaseAudioContext* audio_context = BaseAudioContext::Create(
+ ChildDocument(), AudioContextOptions(), ASSERT_NO_EXCEPTION);
+ RecordAutoplayStatus(audio_context);
+
+ switch (GetParam()) {
+ case AutoplayPolicy::Type::kNoUserGestureRequired:
+ GetHistogramTester()->ExpectTotalCount(kAutoplayMetric, 0);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayCrossOriginMetric, 0);
+ break;
+ case AutoplayPolicy::Type::kUserGestureRequired:
+ case AutoplayPolicy::Type::kUserGestureRequiredForCrossOrigin:
+ case AutoplayPolicy::Type::kDocumentUserActivationRequired:
+ GetHistogramTester()->ExpectBucketCount(
+ kAutoplayMetric, AutoplayStatus::kAutoplayStatusFailed, 1);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayMetric, 1);
+ GetHistogramTester()->ExpectBucketCount(
+ kAutoplayCrossOriginMetric, AutoplayStatus::kAutoplayStatusFailed, 1);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayCrossOriginMetric, 1);
+ break;
+ }
+}
+
+// Creates an AudioContext without a gesture inside a main frame.
+MAYBE_TEST_P(BaseAudioContextAutoplayTest,
+ AutoplayMetrics_CreateNoGesture_Main) {
+ BaseAudioContext* audio_context = BaseAudioContext::Create(
+ GetDocument(), AudioContextOptions(), ASSERT_NO_EXCEPTION);
+ RecordAutoplayStatus(audio_context);
+
+ switch (GetParam()) {
+ case AutoplayPolicy::Type::kNoUserGestureRequired:
+ case AutoplayPolicy::Type::kUserGestureRequired:
+ case AutoplayPolicy::Type::kUserGestureRequiredForCrossOrigin:
+ GetHistogramTester()->ExpectTotalCount(kAutoplayMetric, 0);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayCrossOriginMetric, 0);
+ break;
+ case AutoplayPolicy::Type::kDocumentUserActivationRequired:
+ GetHistogramTester()->ExpectBucketCount(
+ kAutoplayMetric, AutoplayStatus::kAutoplayStatusFailed, 1);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayMetric, 1);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayCrossOriginMetric, 0);
+ break;
+ }
+}
+
+// Creates an AudioContext then call resume without a gesture in a x-origin
+// child frame.
+MAYBE_TEST_P(BaseAudioContextAutoplayTest,
+ AutoplayMetrics_CallResumeNoGesture_Child) {
+ ScriptState::Scope scope(GetScriptStateFrom(ChildDocument()));
+
+ BaseAudioContext* audio_context = BaseAudioContext::Create(
+ ChildDocument(), AudioContextOptions(), ASSERT_NO_EXCEPTION);
+ audio_context->resumeContext(GetScriptStateFrom(ChildDocument()));
+ RejectPendingResolvers(audio_context);
+ RecordAutoplayStatus(audio_context);
+
+ switch (GetParam()) {
+ case AutoplayPolicy::Type::kNoUserGestureRequired:
+ GetHistogramTester()->ExpectTotalCount(kAutoplayMetric, 0);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayCrossOriginMetric, 0);
+ break;
+ case AutoplayPolicy::Type::kUserGestureRequired:
+ case AutoplayPolicy::Type::kUserGestureRequiredForCrossOrigin:
+ case AutoplayPolicy::Type::kDocumentUserActivationRequired:
+ GetHistogramTester()->ExpectBucketCount(
+ kAutoplayMetric, AutoplayStatus::kAutoplayStatusFailed, 1);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayMetric, 1);
+ GetHistogramTester()->ExpectBucketCount(
+ kAutoplayCrossOriginMetric, AutoplayStatus::kAutoplayStatusFailed, 1);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayCrossOriginMetric, 1);
+ break;
+ }
+}
+
+// Creates an AudioContext then call resume without a gesture in a main frame.
+MAYBE_TEST_P(BaseAudioContextAutoplayTest,
+ AutoplayMetrics_CallResumeNoGesture_Main) {
+ ScriptState::Scope scope(GetScriptStateFrom(GetDocument()));
+
+ BaseAudioContext* audio_context = BaseAudioContext::Create(
+ GetDocument(), AudioContextOptions(), ASSERT_NO_EXCEPTION);
+ audio_context->resumeContext(GetScriptStateFrom(ChildDocument()));
+ RejectPendingResolvers(audio_context);
+ RecordAutoplayStatus(audio_context);
+
+ switch (GetParam()) {
+ case AutoplayPolicy::Type::kNoUserGestureRequired:
+ case AutoplayPolicy::Type::kUserGestureRequired:
+ case AutoplayPolicy::Type::kUserGestureRequiredForCrossOrigin:
+ GetHistogramTester()->ExpectTotalCount(kAutoplayMetric, 0);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayCrossOriginMetric, 0);
+ break;
+ case AutoplayPolicy::Type::kDocumentUserActivationRequired:
+ GetHistogramTester()->ExpectBucketCount(
+ kAutoplayMetric, AutoplayStatus::kAutoplayStatusFailed, 1);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayMetric, 1);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayCrossOriginMetric, 0);
+ break;
+ }
+}
+
+// Creates an AudioContext with a user gesture inside a x-origin child frame.
+MAYBE_TEST_P(BaseAudioContextAutoplayTest,
+ AutoplayMetrics_CreateGesture_Child) {
+ std::unique_ptr<UserGestureIndicator> user_gesture_scope =
+ Frame::NotifyUserActivation(ChildDocument().GetFrame(),
+ UserGestureToken::kNewGesture);
+
+ BaseAudioContext* audio_context = BaseAudioContext::Create(
+ ChildDocument(), AudioContextOptions(), ASSERT_NO_EXCEPTION);
+ RecordAutoplayStatus(audio_context);
+
+ switch (GetParam()) {
+ case AutoplayPolicy::Type::kNoUserGestureRequired:
+ GetHistogramTester()->ExpectTotalCount(kAutoplayMetric, 0);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayCrossOriginMetric, 0);
+ break;
+ case AutoplayPolicy::Type::kUserGestureRequired:
+ case AutoplayPolicy::Type::kUserGestureRequiredForCrossOrigin:
+ case AutoplayPolicy::Type::kDocumentUserActivationRequired:
+ GetHistogramTester()->ExpectBucketCount(
+ kAutoplayMetric, AutoplayStatus::kAutoplayStatusSucceeded, 1);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayMetric, 1);
+ GetHistogramTester()->ExpectBucketCount(
+ kAutoplayCrossOriginMetric, AutoplayStatus::kAutoplayStatusSucceeded,
+ 1);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayCrossOriginMetric, 1);
+ break;
+ }
+}
+
+// Creates an AudioContext with a user gesture inside a main frame.
+MAYBE_TEST_P(BaseAudioContextAutoplayTest, AutoplayMetrics_CreateGesture_Main) {
+ std::unique_ptr<UserGestureIndicator> user_gesture_scope =
+ Frame::NotifyUserActivation(GetDocument().GetFrame(),
+ UserGestureToken::kNewGesture);
+
+ BaseAudioContext* audio_context = BaseAudioContext::Create(
+ GetDocument(), AudioContextOptions(), ASSERT_NO_EXCEPTION);
+ RecordAutoplayStatus(audio_context);
+
+ switch (GetParam()) {
+ case AutoplayPolicy::Type::kNoUserGestureRequired:
+ case AutoplayPolicy::Type::kUserGestureRequired:
+ case AutoplayPolicy::Type::kUserGestureRequiredForCrossOrigin:
+ GetHistogramTester()->ExpectTotalCount(kAutoplayMetric, 0);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayCrossOriginMetric, 0);
+ break;
+ case AutoplayPolicy::Type::kDocumentUserActivationRequired:
+ GetHistogramTester()->ExpectBucketCount(
+ kAutoplayMetric, AutoplayStatus::kAutoplayStatusSucceeded, 1);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayMetric, 1);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayCrossOriginMetric, 0);
+ break;
+ }
+}
+
+// Creates an AudioContext then calls resume with a user gesture inside a
+// x-origin child frame.
+MAYBE_TEST_P(BaseAudioContextAutoplayTest,
+ AutoplayMetrics_CallResumeGesture_Child) {
+ ScriptState::Scope scope(GetScriptStateFrom(ChildDocument()));
+
+ BaseAudioContext* audio_context = BaseAudioContext::Create(
+ ChildDocument(), AudioContextOptions(), ASSERT_NO_EXCEPTION);
+
+ std::unique_ptr<UserGestureIndicator> user_gesture_scope =
+ Frame::NotifyUserActivation(ChildDocument().GetFrame(),
+ UserGestureToken::kNewGesture);
+
+ audio_context->resumeContext(GetScriptStateFrom(ChildDocument()));
+ RejectPendingResolvers(audio_context);
+ RecordAutoplayStatus(audio_context);
+
+ switch (GetParam()) {
+ case AutoplayPolicy::Type::kNoUserGestureRequired:
+ GetHistogramTester()->ExpectTotalCount(kAutoplayMetric, 0);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayCrossOriginMetric, 0);
+ break;
+ case AutoplayPolicy::Type::kUserGestureRequired:
+ case AutoplayPolicy::Type::kUserGestureRequiredForCrossOrigin:
+ case AutoplayPolicy::Type::kDocumentUserActivationRequired:
+ GetHistogramTester()->ExpectBucketCount(
+ kAutoplayMetric, AutoplayStatus::kAutoplayStatusSucceeded, 1);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayMetric, 1);
+ GetHistogramTester()->ExpectBucketCount(
+ kAutoplayCrossOriginMetric, AutoplayStatus::kAutoplayStatusSucceeded,
+ 1);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayCrossOriginMetric, 1);
+ break;
+ }
+}
+
+// Creates an AudioContext then calls resume with a user gesture inside a main
+// frame.
+MAYBE_TEST_P(BaseAudioContextAutoplayTest,
+ AutoplayMetrics_CallResumeGesture_Main) {
+ ScriptState::Scope scope(GetScriptStateFrom(GetDocument()));
+
+ BaseAudioContext* audio_context = BaseAudioContext::Create(
+ GetDocument(), AudioContextOptions(), ASSERT_NO_EXCEPTION);
+
+ std::unique_ptr<UserGestureIndicator> user_gesture_scope =
+ Frame::NotifyUserActivation(GetDocument().GetFrame(),
+ UserGestureToken::kNewGesture);
+
+ audio_context->resumeContext(GetScriptStateFrom(GetDocument()));
+ RejectPendingResolvers(audio_context);
+ RecordAutoplayStatus(audio_context);
+
+ switch (GetParam()) {
+ case AutoplayPolicy::Type::kNoUserGestureRequired:
+ case AutoplayPolicy::Type::kUserGestureRequired:
+ case AutoplayPolicy::Type::kUserGestureRequiredForCrossOrigin:
+ GetHistogramTester()->ExpectTotalCount(kAutoplayMetric, 0);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayCrossOriginMetric, 0);
+ break;
+ case AutoplayPolicy::Type::kDocumentUserActivationRequired:
+ GetHistogramTester()->ExpectBucketCount(
+ kAutoplayMetric, AutoplayStatus::kAutoplayStatusSucceeded, 1);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayMetric, 1);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayCrossOriginMetric, 0);
+ break;
+ }
+}
+
+// Creates an AudioContext then calls start on a node without a gesture inside a
+// x-origin child frame.
+MAYBE_TEST_P(BaseAudioContextAutoplayTest,
+ AutoplayMetrics_NodeStartNoGesture_Child) {
+ BaseAudioContext* audio_context = BaseAudioContext::Create(
+ ChildDocument(), AudioContextOptions(), ASSERT_NO_EXCEPTION);
+ audio_context->MaybeRecordStartAttempt();
+ RecordAutoplayStatus(audio_context);
+
+ switch (GetParam()) {
+ case AutoplayPolicy::Type::kNoUserGestureRequired:
+ GetHistogramTester()->ExpectTotalCount(kAutoplayMetric, 0);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayCrossOriginMetric, 0);
+ break;
+ case AutoplayPolicy::Type::kUserGestureRequired:
+ case AutoplayPolicy::Type::kUserGestureRequiredForCrossOrigin:
+ case AutoplayPolicy::Type::kDocumentUserActivationRequired:
+ GetHistogramTester()->ExpectBucketCount(
+ kAutoplayMetric, AutoplayStatus::kAutoplayStatusFailed, 1);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayMetric, 1);
+ GetHistogramTester()->ExpectBucketCount(
+ kAutoplayCrossOriginMetric, AutoplayStatus::kAutoplayStatusFailed, 1);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayCrossOriginMetric, 1);
+ break;
+ }
+}
+
+// Creates an AudioContext then calls start on a node without a gesture inside a
+// main frame.
+MAYBE_TEST_P(BaseAudioContextAutoplayTest,
+ AutoplayMetrics_NodeStartNoGesture_Main) {
+ BaseAudioContext* audio_context = BaseAudioContext::Create(
+ GetDocument(), AudioContextOptions(), ASSERT_NO_EXCEPTION);
+ audio_context->MaybeRecordStartAttempt();
+ RecordAutoplayStatus(audio_context);
+
+ switch (GetParam()) {
+ case AutoplayPolicy::Type::kNoUserGestureRequired:
+ case AutoplayPolicy::Type::kUserGestureRequired:
+ case AutoplayPolicy::Type::kUserGestureRequiredForCrossOrigin:
+ GetHistogramTester()->ExpectTotalCount(kAutoplayMetric, 0);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayCrossOriginMetric, 0);
+ break;
+ case AutoplayPolicy::Type::kDocumentUserActivationRequired:
+ GetHistogramTester()->ExpectBucketCount(
+ kAutoplayMetric, AutoplayStatus::kAutoplayStatusFailed, 1);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayMetric, 1);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayCrossOriginMetric, 0);
+ break;
+ }
+}
+
+// Creates an AudioContext then calls start on a node with a gesture inside a
+// x-origin child frame.
+MAYBE_TEST_P(BaseAudioContextAutoplayTest,
+ AutoplayMetrics_NodeStartGesture_Child) {
+ BaseAudioContext* audio_context = BaseAudioContext::Create(
+ ChildDocument(), AudioContextOptions(), ASSERT_NO_EXCEPTION);
+
+ std::unique_ptr<UserGestureIndicator> user_gesture_scope =
+ Frame::NotifyUserActivation(ChildDocument().GetFrame(),
+ UserGestureToken::kNewGesture);
+ audio_context->MaybeRecordStartAttempt();
+ RecordAutoplayStatus(audio_context);
+
+ switch (GetParam()) {
+ case AutoplayPolicy::Type::kNoUserGestureRequired:
+ GetHistogramTester()->ExpectTotalCount(kAutoplayMetric, 0);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayCrossOriginMetric, 0);
+ break;
+ case AutoplayPolicy::Type::kUserGestureRequired:
+ case AutoplayPolicy::Type::kUserGestureRequiredForCrossOrigin:
+ case AutoplayPolicy::Type::kDocumentUserActivationRequired:
+ GetHistogramTester()->ExpectBucketCount(
+ kAutoplayMetric, AutoplayStatus::kAutoplayStatusFailedWithStart, 1);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayMetric, 1);
+ GetHistogramTester()->ExpectBucketCount(
+ kAutoplayCrossOriginMetric,
+ AutoplayStatus::kAutoplayStatusFailedWithStart, 1);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayCrossOriginMetric, 1);
+ break;
+ }
+}
+
+// Creates an AudioContext then calls start on a node with a gesture inside a
+// main frame.
+MAYBE_TEST_P(BaseAudioContextAutoplayTest,
+ AutoplayMetrics_NodeStartGesture_Main) {
+ BaseAudioContext* audio_context = BaseAudioContext::Create(
+ GetDocument(), AudioContextOptions(), ASSERT_NO_EXCEPTION);
+
+ std::unique_ptr<UserGestureIndicator> user_gesture_scope =
+ Frame::NotifyUserActivation(GetDocument().GetFrame(),
+ UserGestureToken::kNewGesture);
+ audio_context->MaybeRecordStartAttempt();
+ RecordAutoplayStatus(audio_context);
+
+ switch (GetParam()) {
+ case AutoplayPolicy::Type::kNoUserGestureRequired:
+ case AutoplayPolicy::Type::kUserGestureRequired:
+ case AutoplayPolicy::Type::kUserGestureRequiredForCrossOrigin:
+ GetHistogramTester()->ExpectTotalCount(kAutoplayMetric, 0);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayCrossOriginMetric, 0);
+ break;
+ case AutoplayPolicy::Type::kDocumentUserActivationRequired:
+ GetHistogramTester()->ExpectBucketCount(
+ kAutoplayMetric, AutoplayStatus::kAutoplayStatusFailedWithStart, 1);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayMetric, 1);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayCrossOriginMetric, 0);
+ break;
+ }
+}
+
+// Creates an AudioContext then calls start on a node without a gesture and
+// finally allows the AudioContext to produce sound inside x-origin child frame.
+MAYBE_TEST_P(BaseAudioContextAutoplayTest,
+ AutoplayMetrics_NodeStartNoGestureThenSuccess_Child) {
+ ScriptState::Scope scope(GetScriptStateFrom(ChildDocument()));
+
+ BaseAudioContext* audio_context = BaseAudioContext::Create(
+ ChildDocument(), AudioContextOptions(), ASSERT_NO_EXCEPTION);
+ audio_context->MaybeRecordStartAttempt();
+
+ std::unique_ptr<UserGestureIndicator> user_gesture_scope =
+ Frame::NotifyUserActivation(ChildDocument().GetFrame(),
+ UserGestureToken::kNewGesture);
+ audio_context->resumeContext(GetScriptStateFrom(ChildDocument()));
+ RejectPendingResolvers(audio_context);
+ RecordAutoplayStatus(audio_context);
+
+ switch (GetParam()) {
+ case AutoplayPolicy::Type::kNoUserGestureRequired:
+ GetHistogramTester()->ExpectTotalCount(kAutoplayMetric, 0);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayCrossOriginMetric, 0);
+ break;
+ case AutoplayPolicy::Type::kUserGestureRequired:
+ case AutoplayPolicy::Type::kUserGestureRequiredForCrossOrigin:
+ case AutoplayPolicy::Type::kDocumentUserActivationRequired:
+ GetHistogramTester()->ExpectBucketCount(
+ kAutoplayMetric, AutoplayStatus::kAutoplayStatusSucceeded, 1);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayMetric, 1);
+ GetHistogramTester()->ExpectBucketCount(
+ kAutoplayCrossOriginMetric, AutoplayStatus::kAutoplayStatusSucceeded,
+ 1);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayCrossOriginMetric, 1);
+ break;
+ }
+}
+
+// Creates an AudioContext then calls start on a node without a gesture and
+// finally allows the AudioContext to produce sound inside a main frame.
+MAYBE_TEST_P(BaseAudioContextAutoplayTest,
+ AutoplayMetrics_NodeStartNoGestureThenSuccess_Main) {
+ ScriptState::Scope scope(GetScriptStateFrom(GetDocument()));
+
+ BaseAudioContext* audio_context = BaseAudioContext::Create(
+ GetDocument(), AudioContextOptions(), ASSERT_NO_EXCEPTION);
+ audio_context->MaybeRecordStartAttempt();
+
+ std::unique_ptr<UserGestureIndicator> user_gesture_scope =
+ Frame::NotifyUserActivation(GetDocument().GetFrame(),
+ UserGestureToken::kNewGesture);
+ audio_context->resumeContext(GetScriptStateFrom(GetDocument()));
+ RejectPendingResolvers(audio_context);
+ RecordAutoplayStatus(audio_context);
+
+ switch (GetParam()) {
+ case AutoplayPolicy::Type::kNoUserGestureRequired:
+ case AutoplayPolicy::Type::kUserGestureRequired:
+ case AutoplayPolicy::Type::kUserGestureRequiredForCrossOrigin:
+ GetHistogramTester()->ExpectTotalCount(kAutoplayMetric, 0);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayCrossOriginMetric, 0);
+ break;
+ case AutoplayPolicy::Type::kDocumentUserActivationRequired:
+ GetHistogramTester()->ExpectBucketCount(
+ kAutoplayMetric, AutoplayStatus::kAutoplayStatusSucceeded, 1);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayMetric, 1);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayCrossOriginMetric, 0);
+ break;
+ }
+}
+
+// Creates an AudioContext then calls start on a node with a gesture and
+// finally allows the AudioContext to produce sound inside x-origin child frame.
+MAYBE_TEST_P(BaseAudioContextAutoplayTest,
+ AutoplayMetrics_NodeStartGestureThenSucces_Child) {
+ ScriptState::Scope scope(GetScriptStateFrom(ChildDocument()));
+
+ BaseAudioContext* audio_context = BaseAudioContext::Create(
+ ChildDocument(), AudioContextOptions(), ASSERT_NO_EXCEPTION);
+
+ std::unique_ptr<UserGestureIndicator> user_gesture_scope =
+ Frame::NotifyUserActivation(ChildDocument().GetFrame(),
+ UserGestureToken::kNewGesture);
+ audio_context->MaybeRecordStartAttempt();
+ audio_context->resumeContext(GetScriptStateFrom(ChildDocument()));
+ RejectPendingResolvers(audio_context);
+ RecordAutoplayStatus(audio_context);
+
+ switch (GetParam()) {
+ case AutoplayPolicy::Type::kNoUserGestureRequired:
+ GetHistogramTester()->ExpectTotalCount(kAutoplayMetric, 0);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayCrossOriginMetric, 0);
+ break;
+ case AutoplayPolicy::Type::kUserGestureRequired:
+ case AutoplayPolicy::Type::kUserGestureRequiredForCrossOrigin:
+ case AutoplayPolicy::Type::kDocumentUserActivationRequired:
+ GetHistogramTester()->ExpectBucketCount(
+ kAutoplayMetric, AutoplayStatus::kAutoplayStatusSucceeded, 1);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayMetric, 1);
+ GetHistogramTester()->ExpectBucketCount(
+ kAutoplayCrossOriginMetric, AutoplayStatus::kAutoplayStatusSucceeded,
+ 1);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayCrossOriginMetric, 1);
+ break;
+ }
+}
+
+// Creates an AudioContext then calls start on a node with a gesture and
+// finally allows the AudioContext to produce sound inside a main frame.
+MAYBE_TEST_P(BaseAudioContextAutoplayTest,
+ AutoplayMetrics_NodeStartGestureThenSucces_Main) {
+ ScriptState::Scope scope(GetScriptStateFrom(GetDocument()));
+
+ BaseAudioContext* audio_context = BaseAudioContext::Create(
+ GetDocument(), AudioContextOptions(), ASSERT_NO_EXCEPTION);
+
+ std::unique_ptr<UserGestureIndicator> user_gesture_scope =
+ Frame::NotifyUserActivation(GetDocument().GetFrame(),
+ UserGestureToken::kNewGesture);
+ audio_context->MaybeRecordStartAttempt();
+ audio_context->resumeContext(GetScriptStateFrom(GetDocument()));
+ RejectPendingResolvers(audio_context);
+ RecordAutoplayStatus(audio_context);
+
+ switch (GetParam()) {
+ case AutoplayPolicy::Type::kNoUserGestureRequired:
+ case AutoplayPolicy::Type::kUserGestureRequired:
+ case AutoplayPolicy::Type::kUserGestureRequiredForCrossOrigin:
+ GetHistogramTester()->ExpectTotalCount(kAutoplayMetric, 0);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayCrossOriginMetric, 0);
+ break;
+ case AutoplayPolicy::Type::kDocumentUserActivationRequired:
+ GetHistogramTester()->ExpectBucketCount(
+ kAutoplayMetric, AutoplayStatus::kAutoplayStatusSucceeded, 1);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayMetric, 1);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayCrossOriginMetric, 0);
+ break;
+ }
+}
+
+// Attempts to autoplay an AudioContext in a x-origin child frame when the
+// document previous received a user gesture.
+MAYBE_TEST_P(BaseAudioContextAutoplayTest,
+ AutoplayMetrics_DocumentReceivedGesture_Child) {
+ Frame::NotifyUserActivation(ChildDocument().GetFrame(),
+ UserGestureToken::kNewGesture);
+
+ BaseAudioContext* audio_context = BaseAudioContext::Create(
+ ChildDocument(), AudioContextOptions(), ASSERT_NO_EXCEPTION);
+ RecordAutoplayStatus(audio_context);
+
+ switch (GetParam()) {
+ case AutoplayPolicy::Type::kNoUserGestureRequired:
+ GetHistogramTester()->ExpectTotalCount(kAutoplayMetric, 0);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayCrossOriginMetric, 0);
+ break;
+ case AutoplayPolicy::Type::kUserGestureRequired:
+ case AutoplayPolicy::Type::kUserGestureRequiredForCrossOrigin:
+ GetHistogramTester()->ExpectBucketCount(
+ kAutoplayMetric, AutoplayStatus::kAutoplayStatusFailed, 1);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayMetric, 1);
+ GetHistogramTester()->ExpectBucketCount(
+ kAutoplayCrossOriginMetric, AutoplayStatus::kAutoplayStatusFailed, 1);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayCrossOriginMetric, 1);
+ break;
+ case AutoplayPolicy::Type::kDocumentUserActivationRequired:
+ GetHistogramTester()->ExpectBucketCount(
+ kAutoplayMetric, AutoplayStatus::kAutoplayStatusSucceeded, 1);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayMetric, 1);
+ GetHistogramTester()->ExpectBucketCount(
+ kAutoplayCrossOriginMetric, AutoplayStatus::kAutoplayStatusSucceeded,
+ 1);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayCrossOriginMetric, 1);
+ break;
+ }
+}
+
+// Attempts to autoplay an AudioContext in a main child frame when the
+// document previous received a user gesture.
+MAYBE_TEST_P(BaseAudioContextAutoplayTest,
+ AutoplayMetrics_DocumentReceivedGesture_Main) {
+ Frame::NotifyUserActivation(ChildDocument().GetFrame(),
+ UserGestureToken::kNewGesture);
+
+ BaseAudioContext* audio_context = BaseAudioContext::Create(
+ GetDocument(), AudioContextOptions(), ASSERT_NO_EXCEPTION);
+ RecordAutoplayStatus(audio_context);
+
+ switch (GetParam()) {
+ case AutoplayPolicy::Type::kNoUserGestureRequired:
+ case AutoplayPolicy::Type::kUserGestureRequired:
+ case AutoplayPolicy::Type::kUserGestureRequiredForCrossOrigin:
+ GetHistogramTester()->ExpectTotalCount(kAutoplayMetric, 0);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayCrossOriginMetric, 0);
+ break;
+ case AutoplayPolicy::Type::kDocumentUserActivationRequired:
+ GetHistogramTester()->ExpectBucketCount(
+ kAutoplayMetric, AutoplayStatus::kAutoplayStatusSucceeded, 1);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayMetric, 1);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayCrossOriginMetric, 0);
+ break;
+ }
+}
+
+// Attempts to autoplay an AudioContext in a main child frame when the
+// document received a user gesture before navigation.
+MAYBE_TEST_P(BaseAudioContextAutoplayTest,
+ AutoplayMetrics_DocumentReceivedGesture_BeforeNavigation) {
+ GetDocument().GetFrame()->SetDocumentHasReceivedUserGestureBeforeNavigation(
+ true);
+
+ BaseAudioContext* audio_context = BaseAudioContext::Create(
+ GetDocument(), AudioContextOptions(), ASSERT_NO_EXCEPTION);
+ RecordAutoplayStatus(audio_context);
+
+ switch (GetParam()) {
+ case AutoplayPolicy::Type::kNoUserGestureRequired:
+ case AutoplayPolicy::Type::kUserGestureRequired:
+ case AutoplayPolicy::Type::kUserGestureRequiredForCrossOrigin:
+ GetHistogramTester()->ExpectTotalCount(kAutoplayMetric, 0);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayCrossOriginMetric, 0);
+ break;
+ case AutoplayPolicy::Type::kDocumentUserActivationRequired:
+ GetHistogramTester()->ExpectBucketCount(
+ kAutoplayMetric, AutoplayStatus::kAutoplayStatusSucceeded, 1);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayMetric, 1);
+ GetHistogramTester()->ExpectTotalCount(kAutoplayCrossOriginMetric, 0);
+ break;
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(
+ BaseAudioContextAutoplayTest,
+ BaseAudioContextAutoplayTest,
+ testing::Values(AutoplayPolicy::Type::kNoUserGestureRequired,
+ AutoplayPolicy::Type::kUserGestureRequired,
+ AutoplayPolicy::Type::kUserGestureRequiredForCrossOrigin,
+ AutoplayPolicy::Type::kDocumentUserActivationRequired));
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/biquad_dsp_kernel.cc b/chromium/third_party/blink/renderer/modules/webaudio/biquad_dsp_kernel.cc
new file mode 100644
index 00000000000..758de1ceb27
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/biquad_dsp_kernel.cc
@@ -0,0 +1,256 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#include <limits.h>
+#include "third_party/blink/renderer/modules/webaudio/biquad_dsp_kernel.h"
+#include "third_party/blink/renderer/platform/audio/audio_utilities.h"
+#include "third_party/blink/renderer/platform/wtf/math_extras.h"
+#include "third_party/blink/renderer/platform/wtf/vector.h"
+
+namespace blink {
+
+static bool hasConstantValues(float* values, int frames_to_process) {
+ // TODO(rtoy): Use SIMD to optimize this. This would speed up
+ // processing by a factor of 4 because we can process 4 floats at a
+ // time.
+ float value = values[0];
+
+ for (int k = 1; k < frames_to_process; ++k) {
+ if (values[k] != value) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void BiquadDSPKernel::UpdateCoefficientsIfNecessary(int frames_to_process) {
+ if (GetBiquadProcessor()->FilterCoefficientsDirty()) {
+ float cutoff_frequency[AudioUtilities::kRenderQuantumFrames];
+ float q[AudioUtilities::kRenderQuantumFrames];
+ float gain[AudioUtilities::kRenderQuantumFrames];
+ float detune[AudioUtilities::kRenderQuantumFrames]; // in Cents
+
+ SECURITY_CHECK(static_cast<unsigned>(frames_to_process) <=
+ AudioUtilities::kRenderQuantumFrames);
+
+ if (GetBiquadProcessor()->HasSampleAccurateValues()) {
+ GetBiquadProcessor()->Parameter1().CalculateSampleAccurateValues(
+ cutoff_frequency, frames_to_process);
+ GetBiquadProcessor()->Parameter2().CalculateSampleAccurateValues(
+ q, frames_to_process);
+ GetBiquadProcessor()->Parameter3().CalculateSampleAccurateValues(
+ gain, frames_to_process);
+ GetBiquadProcessor()->Parameter4().CalculateSampleAccurateValues(
+ detune, frames_to_process);
+
+ // If all the values are actually constant for this render, we
+ // don't need to compute filter coefficients for each frame
+ // since they would be the same as the first.
+ bool isConstant =
+ hasConstantValues(cutoff_frequency, frames_to_process) &&
+ hasConstantValues(q, frames_to_process) &&
+ hasConstantValues(gain, frames_to_process) &&
+ hasConstantValues(detune, frames_to_process);
+
+ UpdateCoefficients(isConstant ? 1 : frames_to_process, cutoff_frequency,
+ q, gain, detune);
+ } else {
+ cutoff_frequency[0] = GetBiquadProcessor()->Parameter1().Value();
+ q[0] = GetBiquadProcessor()->Parameter2().Value();
+ gain[0] = GetBiquadProcessor()->Parameter3().Value();
+ detune[0] = GetBiquadProcessor()->Parameter4().Value();
+ UpdateCoefficients(1, cutoff_frequency, q, gain, detune);
+ }
+ }
+}
+
+void BiquadDSPKernel::UpdateCoefficients(int number_of_frames,
+ const float* cutoff_frequency,
+ const float* q,
+ const float* gain,
+ const float* detune) {
+ // Convert from Hertz to normalized frequency 0 -> 1.
+ double nyquist = this->Nyquist();
+
+ biquad_.SetHasSampleAccurateValues(number_of_frames > 1);
+
+ for (int k = 0; k < number_of_frames; ++k) {
+ double normalized_frequency = cutoff_frequency[k] / nyquist;
+
+ // Offset frequency by detune.
+ if (detune[k]) {
+ // Detune multiplies the frequency by 2^(detune[k] / 1200).
+ normalized_frequency *= exp2(detune[k] / 1200);
+ }
+
+ // Configure the biquad with the new filter parameters for the appropriate
+ // type of filter.
+ switch (GetBiquadProcessor()->GetType()) {
+ case BiquadProcessor::kLowPass:
+ biquad_.SetLowpassParams(k, normalized_frequency, q[k]);
+ break;
+
+ case BiquadProcessor::kHighPass:
+ biquad_.SetHighpassParams(k, normalized_frequency, q[k]);
+ break;
+
+ case BiquadProcessor::kBandPass:
+ biquad_.SetBandpassParams(k, normalized_frequency, q[k]);
+ break;
+
+ case BiquadProcessor::kLowShelf:
+ biquad_.SetLowShelfParams(k, normalized_frequency, gain[k]);
+ break;
+
+ case BiquadProcessor::kHighShelf:
+ biquad_.SetHighShelfParams(k, normalized_frequency, gain[k]);
+ break;
+
+ case BiquadProcessor::kPeaking:
+ biquad_.SetPeakingParams(k, normalized_frequency, q[k], gain[k]);
+ break;
+
+ case BiquadProcessor::kNotch:
+ biquad_.SetNotchParams(k, normalized_frequency, q[k]);
+ break;
+
+ case BiquadProcessor::kAllpass:
+ biquad_.SetAllpassParams(k, normalized_frequency, q[k]);
+ break;
+ }
+ }
+
+ UpdateTailTime(number_of_frames - 1);
+}
+
+void BiquadDSPKernel::UpdateTailTime(int coef_index) {
+ // A reasonable upper limit for the tail time. While it's easy to
+ // create biquad filters whose tail time can be much larger than
+ // this, limit the maximum to this value so that we don't keep such
+ // nodes alive "forever".
+ // TODO: What is a reasonable upper limit?
+ const double kMaxTailTime = 30;
+
+ double sample_rate = SampleRate();
+ double tail =
+ biquad_.TailFrame(coef_index, kMaxTailTime * sample_rate) / sample_rate;
+
+ tail_time_ = clampTo(tail, 0.0, kMaxTailTime);
+}
+
+void BiquadDSPKernel::Process(const float* source,
+ float* destination,
+ size_t frames_to_process) {
+ DCHECK(source);
+ DCHECK(destination);
+ DCHECK(GetBiquadProcessor());
+
+ // Recompute filter coefficients if any of the parameters have changed.
+ // FIXME: as an optimization, implement a way that a Biquad object can simply
+ // copy its internal filter coefficients from another Biquad object. Then
+ // re-factor this code to only run for the first BiquadDSPKernel of each
+ // BiquadProcessor.
+
+ // The audio thread can't block on this lock; skip updating the coefficients
+ // for this block if necessary. We'll get them the next time around.
+ {
+ MutexTryLocker try_locker(process_lock_);
+ if (try_locker.Locked())
+ UpdateCoefficientsIfNecessary(frames_to_process);
+ }
+
+ biquad_.Process(source, destination, frames_to_process);
+}
+
+void BiquadDSPKernel::GetFrequencyResponse(int n_frequencies,
+ const float* frequency_hz,
+ float* mag_response,
+ float* phase_response) {
+ bool is_good =
+ n_frequencies > 0 && frequency_hz && mag_response && phase_response;
+ DCHECK(is_good);
+ if (!is_good)
+ return;
+
+ Vector<float> frequency(n_frequencies);
+
+ double nyquist = this->Nyquist();
+
+ // Convert from frequency in Hz to normalized frequency (0 -> 1),
+ // with 1 equal to the Nyquist frequency.
+ for (int k = 0; k < n_frequencies; ++k)
+ frequency[k] = frequency_hz[k] / nyquist;
+
+ float cutoff_frequency;
+ float q;
+ float gain;
+ float detune; // in Cents
+
+ {
+ // Get a copy of the current biquad filter coefficients so we can update the
+ // biquad with these values. We need to synchronize with process() to
+ // prevent process() from updating the filter coefficients while we're
+ // trying to access them. The process will update it next time around.
+ //
+ // The BiquadDSPKernel object here (along with it's Biquad object) is for
+ // querying the frequency response and is NOT the same as the one in
+ // process() which is used for performing the actual filtering. This one is
+ // is created in BiquadProcessor::getFrequencyResponse for this purpose.
+ // Both, however, point to the same BiquadProcessor object.
+ //
+ // FIXME: Simplify this: crbug.com/390266
+ MutexLocker process_locker(process_lock_);
+
+ cutoff_frequency = GetBiquadProcessor()->Parameter1().Value();
+ q = GetBiquadProcessor()->Parameter2().Value();
+ gain = GetBiquadProcessor()->Parameter3().Value();
+ detune = GetBiquadProcessor()->Parameter4().Value();
+ }
+
+ UpdateCoefficients(1, &cutoff_frequency, &q, &gain, &detune);
+
+ biquad_.GetFrequencyResponse(n_frequencies, frequency.data(), mag_response,
+ phase_response);
+}
+
+bool BiquadDSPKernel::RequiresTailProcessing() const {
+ // Always return true even if the tail time and latency might both
+ // be zero. This is for simplicity and because TailTime() is 0
+ // basically only when the filter response H(z) = 0 or H(z) = 1. And
+ // it's ok to return true. It just means the node lives a little
+ // longer than strictly necessary.
+ return true;
+}
+
+double BiquadDSPKernel::TailTime() const {
+ return tail_time_;
+}
+
+double BiquadDSPKernel::LatencyTime() const {
+ return 0;
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/biquad_dsp_kernel.h b/chromium/third_party/blink/renderer/modules/webaudio/biquad_dsp_kernel.h
new file mode 100644
index 00000000000..21fe6b0a34e
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/biquad_dsp_kernel.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_BIQUAD_DSP_KERNEL_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_BIQUAD_DSP_KERNEL_H_
+
+#include "third_party/blink/renderer/modules/webaudio/biquad_processor.h"
+#include "third_party/blink/renderer/platform/audio/audio_dsp_kernel.h"
+#include "third_party/blink/renderer/platform/audio/biquad.h"
+
+namespace blink {
+
+class BiquadProcessor;
+
+// BiquadDSPKernel is an AudioDSPKernel and is responsible for filtering one
+// channel of a BiquadProcessor using a Biquad object.
+
+class BiquadDSPKernel final : public AudioDSPKernel {
+ public:
+ explicit BiquadDSPKernel(BiquadProcessor* processor)
+ : AudioDSPKernel(processor),
+ tail_time_(std::numeric_limits<double>::infinity()) {}
+
+ // AudioDSPKernel
+ void Process(const float* source,
+ float* dest,
+ size_t frames_to_process) override;
+ void Reset() override { biquad_.Reset(); }
+
+ // Get the magnitude and phase response of the filter at the given
+ // set of frequencies (in Hz). The phase response is in radians.
+ void GetFrequencyResponse(int n_frequencies,
+ const float* frequency_hz,
+ float* mag_response,
+ float* phase_response);
+
+ bool RequiresTailProcessing() const final;
+ double TailTime() const override;
+ double LatencyTime() const override;
+
+ protected:
+ Biquad biquad_;
+ BiquadProcessor* GetBiquadProcessor() {
+ return static_cast<BiquadProcessor*>(Processor());
+ }
+
+ // To prevent audio glitches when parameters are changed,
+ // dezippering is used to slowly change the parameters.
+ void UpdateCoefficientsIfNecessary(int);
+ // Update the biquad cofficients with the given parameters
+ void UpdateCoefficients(int,
+ const float* frequency,
+ const float* q,
+ const float* gain,
+ const float* detune);
+
+ private:
+ // Compute the tail time using the BiquadFilter coefficients at
+ // index |coef_index|.
+ void UpdateTailTime(int coef_index);
+
+ // Synchronize process() with getting and setting the filter coefficients.
+ mutable Mutex process_lock_;
+
+ // The current tail time for biquad filter.
+ double tail_time_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_BIQUAD_DSP_KERNEL_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/biquad_filter_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/biquad_filter_node.cc
new file mode 100644
index 00000000000..0c3290d8090
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/biquad_filter_node.cc
@@ -0,0 +1,235 @@
+/*
+ * Copyright (C) 2011, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#include "third_party/blink/renderer/modules/webaudio/biquad_filter_node.h"
+
+#include <memory>
+
+#include "third_party/blink/renderer/bindings/core/v8/exception_messages.h"
+#include "third_party/blink/renderer/bindings/core/v8/exception_state.h"
+#include "third_party/blink/renderer/core/dom/exception_code.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_basic_processor_handler.h"
+#include "third_party/blink/renderer/modules/webaudio/biquad_filter_options.h"
+#include "third_party/blink/renderer/platform/histogram.h"
+
+namespace blink {
+
+BiquadFilterHandler::BiquadFilterHandler(AudioNode& node,
+ float sample_rate,
+ AudioParamHandler& frequency,
+ AudioParamHandler& q,
+ AudioParamHandler& gain,
+ AudioParamHandler& detune)
+ : AudioBasicProcessorHandler(kNodeTypeBiquadFilter,
+ node,
+ sample_rate,
+ std::make_unique<BiquadProcessor>(sample_rate,
+ 1,
+ frequency,
+ q,
+ gain,
+ detune)) {
+ // Initialize the handler so that AudioParams can be processed.
+ Initialize();
+}
+
+scoped_refptr<BiquadFilterHandler> BiquadFilterHandler::Create(
+ AudioNode& node,
+ float sample_rate,
+ AudioParamHandler& frequency,
+ AudioParamHandler& q,
+ AudioParamHandler& gain,
+ AudioParamHandler& detune) {
+ return base::AdoptRef(
+ new BiquadFilterHandler(node, sample_rate, frequency, q, gain, detune));
+}
+
+BiquadFilterNode::BiquadFilterNode(BaseAudioContext& context)
+ : AudioNode(context),
+ frequency_(AudioParam::Create(context,
+ kParamTypeBiquadFilterFrequency,
+ "BiquadFilter.frequency",
+ 350.0,
+ 0,
+ context.sampleRate() / 2)),
+ q_(AudioParam::Create(context,
+ kParamTypeBiquadFilterQ,
+ "BiquadFilter.Q",
+ 1.0)),
+ gain_(AudioParam::Create(context,
+ kParamTypeBiquadFilterGain,
+ "BiquadFilter.gain",
+ 0.0)),
+ detune_(AudioParam::Create(context,
+ kParamTypeBiquadFilterDetune,
+ "BiquadFilter.detune",
+ 0.0)) {
+ SetHandler(BiquadFilterHandler::Create(*this, context.sampleRate(),
+ frequency_->Handler(), q_->Handler(),
+ gain_->Handler(), detune_->Handler()));
+
+ setType("lowpass");
+}
+
+BiquadFilterNode* BiquadFilterNode::Create(BaseAudioContext& context,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ if (context.IsContextClosed()) {
+ context.ThrowExceptionForClosedState(exception_state);
+ return nullptr;
+ }
+
+ return new BiquadFilterNode(context);
+}
+
+BiquadFilterNode* BiquadFilterNode::Create(BaseAudioContext* context,
+ const BiquadFilterOptions& options,
+ ExceptionState& exception_state) {
+ BiquadFilterNode* node = Create(*context, exception_state);
+
+ if (!node)
+ return nullptr;
+
+ node->HandleChannelOptions(options, exception_state);
+
+ node->setType(options.type());
+ node->q()->setValue(options.Q());
+ node->detune()->setValue(options.detune());
+ node->frequency()->setValue(options.frequency());
+ node->gain()->setValue(options.gain());
+
+ return node;
+}
+
+void BiquadFilterNode::Trace(blink::Visitor* visitor) {
+ visitor->Trace(frequency_);
+ visitor->Trace(q_);
+ visitor->Trace(gain_);
+ visitor->Trace(detune_);
+ AudioNode::Trace(visitor);
+}
+
+BiquadProcessor* BiquadFilterNode::GetBiquadProcessor() const {
+ return static_cast<BiquadProcessor*>(
+ static_cast<BiquadFilterHandler&>(Handler()).Processor());
+}
+
+String BiquadFilterNode::type() const {
+ switch (
+ const_cast<BiquadFilterNode*>(this)->GetBiquadProcessor()->GetType()) {
+ case BiquadProcessor::kLowPass:
+ return "lowpass";
+ case BiquadProcessor::kHighPass:
+ return "highpass";
+ case BiquadProcessor::kBandPass:
+ return "bandpass";
+ case BiquadProcessor::kLowShelf:
+ return "lowshelf";
+ case BiquadProcessor::kHighShelf:
+ return "highshelf";
+ case BiquadProcessor::kPeaking:
+ return "peaking";
+ case BiquadProcessor::kNotch:
+ return "notch";
+ case BiquadProcessor::kAllpass:
+ return "allpass";
+ default:
+ NOTREACHED();
+ return "lowpass";
+ }
+}
+
+void BiquadFilterNode::setType(const String& type) {
+ // For the Q histogram, we need to change the name of the AudioParam for the
+ // lowpass and highpass filters so we know to count the Q value when it is
+ // set. And explicitly set the value to itself so the histograms know the
+ // initial value.
+
+ if (type == "lowpass") {
+ setType(BiquadProcessor::kLowPass);
+ } else if (type == "highpass") {
+ setType(BiquadProcessor::kHighPass);
+ } else if (type == "bandpass") {
+ setType(BiquadProcessor::kBandPass);
+ } else if (type == "lowshelf") {
+ setType(BiquadProcessor::kLowShelf);
+ } else if (type == "highshelf") {
+ setType(BiquadProcessor::kHighShelf);
+ } else if (type == "peaking") {
+ setType(BiquadProcessor::kPeaking);
+ } else if (type == "notch") {
+ setType(BiquadProcessor::kNotch);
+ } else if (type == "allpass") {
+ setType(BiquadProcessor::kAllpass);
+ }
+}
+
+bool BiquadFilterNode::setType(unsigned type) {
+ if (type > BiquadProcessor::kAllpass)
+ return false;
+
+ DEFINE_STATIC_LOCAL(
+ EnumerationHistogram, filter_type_histogram,
+ ("WebAudio.BiquadFilter.Type", BiquadProcessor::kAllpass + 1));
+ filter_type_histogram.Count(type);
+
+ GetBiquadProcessor()->SetType(static_cast<BiquadProcessor::FilterType>(type));
+ return true;
+}
+
+void BiquadFilterNode::getFrequencyResponse(
+ NotShared<const DOMFloat32Array> frequency_hz,
+ NotShared<DOMFloat32Array> mag_response,
+ NotShared<DOMFloat32Array> phase_response,
+ ExceptionState& exception_state) {
+ unsigned frequency_hz_length = frequency_hz.View()->length();
+
+ if (mag_response.View()->length() != frequency_hz_length) {
+ exception_state.ThrowDOMException(
+ kInvalidAccessError,
+ ExceptionMessages::IndexOutsideRange(
+ "magResponse length", mag_response.View()->length(),
+ frequency_hz_length, ExceptionMessages::kInclusiveBound,
+ frequency_hz_length, ExceptionMessages::kInclusiveBound));
+ return;
+ }
+
+ if (phase_response.View()->length() != frequency_hz_length) {
+ exception_state.ThrowDOMException(
+ kInvalidAccessError,
+ ExceptionMessages::IndexOutsideRange(
+ "phaseResponse length", phase_response.View()->length(),
+ frequency_hz_length, ExceptionMessages::kInclusiveBound,
+ frequency_hz_length, ExceptionMessages::kInclusiveBound));
+ return;
+ }
+
+ GetBiquadProcessor()->GetFrequencyResponse(
+ frequency_hz_length, frequency_hz.View()->Data(),
+ mag_response.View()->Data(), phase_response.View()->Data());
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/biquad_filter_node.h b/chromium/third_party/blink/renderer/modules/webaudio/biquad_filter_node.h
new file mode 100644
index 00000000000..8407c7d9b70
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/biquad_filter_node.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2011, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_BIQUAD_FILTER_NODE_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_BIQUAD_FILTER_NODE_H_
+
+#include "third_party/blink/renderer/core/typed_arrays/array_buffer_view_helpers.h"
+#include "third_party/blink/renderer/core/typed_arrays/dom_typed_array.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_basic_processor_handler.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node.h"
+#include "third_party/blink/renderer/modules/webaudio/biquad_processor.h"
+
+namespace blink {
+
+class BaseAudioContext;
+class AudioParam;
+class BiquadFilterOptions;
+
+class BiquadFilterHandler : public AudioBasicProcessorHandler {
+ public:
+ static scoped_refptr<BiquadFilterHandler> Create(AudioNode&,
+ float sample_rate,
+ AudioParamHandler& frequency,
+ AudioParamHandler& q,
+ AudioParamHandler& gain,
+ AudioParamHandler& detune);
+
+ private:
+ BiquadFilterHandler(AudioNode&,
+ float sample_rate,
+ AudioParamHandler& frequency,
+ AudioParamHandler& q,
+ AudioParamHandler& gain,
+ AudioParamHandler& detune);
+};
+
+class BiquadFilterNode final : public AudioNode {
+ DEFINE_WRAPPERTYPEINFO();
+
+ public:
+ // These must be defined as in the .idl file and must match those in the
+ // BiquadProcessor class.
+ enum {
+ LOWPASS = 0,
+ HIGHPASS = 1,
+ BANDPASS = 2,
+ LOWSHELF = 3,
+ HIGHSHELF = 4,
+ PEAKING = 5,
+ NOTCH = 6,
+ ALLPASS = 7
+ };
+
+ static BiquadFilterNode* Create(BaseAudioContext&, ExceptionState&);
+ static BiquadFilterNode* Create(BaseAudioContext*,
+ const BiquadFilterOptions&,
+ ExceptionState&);
+
+ virtual void Trace(blink::Visitor*);
+
+ String type() const;
+ void setType(const String&);
+
+ AudioParam* frequency() { return frequency_; }
+ AudioParam* q() { return q_; }
+ AudioParam* gain() { return gain_; }
+ AudioParam* detune() { return detune_; }
+
+ // Get the magnitude and phase response of the filter at the given
+ // set of frequencies (in Hz). The phase response is in radians.
+ void getFrequencyResponse(NotShared<const DOMFloat32Array> frequency_hz,
+ NotShared<DOMFloat32Array> mag_response,
+ NotShared<DOMFloat32Array> phase_response,
+ ExceptionState&);
+
+ private:
+ BiquadFilterNode(BaseAudioContext&);
+
+ BiquadProcessor* GetBiquadProcessor() const;
+ bool setType(unsigned); // Returns true on success.
+
+ Member<AudioParam> frequency_;
+ Member<AudioParam> q_;
+ Member<AudioParam> gain_;
+ Member<AudioParam> detune_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_BIQUAD_FILTER_NODE_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/biquad_filter_node.idl b/chromium/third_party/blink/renderer/modules/webaudio/biquad_filter_node.idl
new file mode 100644
index 00000000000..63fb78f5668
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/biquad_filter_node.idl
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2011, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+// See https://webaudio.github.io/web-audio-api/#biquadfilternode
+enum BiquadFilterType {
+ "lowpass",
+ "highpass",
+ "bandpass",
+ "lowshelf",
+ "highshelf",
+ "peaking",
+ "notch",
+ "allpass"
+};
+
+[
+ Constructor(BaseAudioContext context, optional BiquadFilterOptions options),
+ RaisesException=Constructor,
+ Measure
+]
+interface BiquadFilterNode : AudioNode {
+ attribute BiquadFilterType type;
+
+ readonly attribute AudioParam frequency; // in Hertz
+ readonly attribute AudioParam detune; // in Cents
+ readonly attribute AudioParam Q; // Quality factor
+ readonly attribute AudioParam gain; // in Decibels
+
+ [RaisesException] void getFrequencyResponse(Float32Array frequencyHz,
+ Float32Array magResponse,
+ Float32Array phaseResponse);
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/biquad_filter_options.idl b/chromium/third_party/blink/renderer/modules/webaudio/biquad_filter_options.idl
new file mode 100644
index 00000000000..fe29d87df93
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/biquad_filter_options.idl
@@ -0,0 +1,12 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See https://webaudio.github.io/web-audio-api/#dictdef-biquadfilteroptions
+dictionary BiquadFilterOptions : AudioNodeOptions {
+ BiquadFilterType type = "lowpass";
+ float Q = 1;
+ float detune = 0;
+ float frequency = 350;
+ float gain = 0;
+}; \ No newline at end of file
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/biquad_processor.cc b/chromium/third_party/blink/renderer/modules/webaudio/biquad_processor.cc
new file mode 100644
index 00000000000..5b80b30be8b
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/biquad_processor.cc
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#include <memory>
+#include "third_party/blink/renderer/modules/webaudio/biquad_dsp_kernel.h"
+#include "third_party/blink/renderer/modules/webaudio/biquad_processor.h"
+#include "third_party/blink/renderer/platform/audio/audio_utilities.h"
+
+namespace blink {
+
+BiquadProcessor::BiquadProcessor(float sample_rate,
+ size_t number_of_channels,
+ AudioParamHandler& frequency,
+ AudioParamHandler& q,
+ AudioParamHandler& gain,
+ AudioParamHandler& detune)
+ : AudioDSPKernelProcessor(sample_rate, number_of_channels),
+ type_(kLowPass),
+ parameter1_(&frequency),
+ parameter2_(&q),
+ parameter3_(&gain),
+ parameter4_(&detune),
+ filter_coefficients_dirty_(true),
+ has_sample_accurate_values_(false) {}
+
+BiquadProcessor::~BiquadProcessor() {
+ if (IsInitialized())
+ Uninitialize();
+}
+
+std::unique_ptr<AudioDSPKernel> BiquadProcessor::CreateKernel() {
+ return std::make_unique<BiquadDSPKernel>(this);
+}
+
+void BiquadProcessor::CheckForDirtyCoefficients() {
+ // Deal with smoothing / de-zippering. Start out assuming filter parameters
+ // are not changing.
+
+ // The BiquadDSPKernel objects rely on this value to see if they need to
+ // re-compute their internal filter coefficients.
+ filter_coefficients_dirty_ = false;
+ has_sample_accurate_values_ = false;
+
+ if (parameter1_->HasSampleAccurateValues() ||
+ parameter2_->HasSampleAccurateValues() ||
+ parameter3_->HasSampleAccurateValues() ||
+ parameter4_->HasSampleAccurateValues()) {
+ filter_coefficients_dirty_ = true;
+ has_sample_accurate_values_ = true;
+ } else {
+ if (has_just_reset_) {
+ // Snap to exact values first time after reset, then smooth for subsequent
+ // changes.
+ parameter1_->ResetSmoothedValue();
+ parameter2_->ResetSmoothedValue();
+ parameter3_->ResetSmoothedValue();
+ parameter4_->ResetSmoothedValue();
+ filter_coefficients_dirty_ = true;
+ has_just_reset_ = false;
+ } else {
+ // TODO(crbug.com/763994): With dezippering removed, we don't want to use
+ // these methods. We need to implement another way of noticing if one of
+ // the parameters has changed. We do this as an optimization because
+ // computing the filter coefficients from these parameters is fairly
+ // expensive. NB: The calls to Smooth() don't actually cause the
+ // coefficients to be dezippered. This is just a way to notice that the
+ // coefficient values have changed. |UpdateCoefficientsIfNecessary()|
+ // checks to see if the filter coefficients are dirty and sets the filter
+ // to the new value, without smoothing.
+ //
+ // Smooth all of the filter parameters. If they haven't yet converged to
+ // their target value then mark coefficients as dirty.
+ bool is_stable1 = parameter1_->Smooth();
+ bool is_stable2 = parameter2_->Smooth();
+ bool is_stable3 = parameter3_->Smooth();
+ bool is_stable4 = parameter4_->Smooth();
+ if (!(is_stable1 && is_stable2 && is_stable3 && is_stable4))
+ filter_coefficients_dirty_ = true;
+ }
+ }
+}
+
+void BiquadProcessor::Process(const AudioBus* source,
+ AudioBus* destination,
+ size_t frames_to_process) {
+ if (!IsInitialized()) {
+ destination->Zero();
+ return;
+ }
+
+ // Synchronize with possible dynamic changes to the impulse response.
+ MutexTryLocker try_locker(process_lock_);
+ if (!try_locker.Locked()) {
+ // Can't get the lock. We must be in the middle of changing something.
+ destination->Zero();
+ return;
+ }
+
+ CheckForDirtyCoefficients();
+
+ // For each channel of our input, process using the corresponding
+ // BiquadDSPKernel into the output channel.
+ for (unsigned i = 0; i < kernels_.size(); ++i)
+ kernels_[i]->Process(source->Channel(i)->Data(),
+ destination->Channel(i)->MutableData(),
+ frames_to_process);
+}
+
+void BiquadProcessor::ProcessOnlyAudioParams(size_t frames_to_process) {
+ DCHECK_LE(frames_to_process, AudioUtilities::kRenderQuantumFrames);
+
+ float values[AudioUtilities::kRenderQuantumFrames];
+
+ parameter1_->CalculateSampleAccurateValues(values, frames_to_process);
+ parameter2_->CalculateSampleAccurateValues(values, frames_to_process);
+ parameter3_->CalculateSampleAccurateValues(values, frames_to_process);
+ parameter4_->CalculateSampleAccurateValues(values, frames_to_process);
+}
+
+void BiquadProcessor::SetType(FilterType type) {
+ if (type != type_) {
+ type_ = type;
+ Reset(); // The filter state must be reset only if the type has changed.
+ }
+}
+
+void BiquadProcessor::GetFrequencyResponse(int n_frequencies,
+ const float* frequency_hz,
+ float* mag_response,
+ float* phase_response) {
+ // Compute the frequency response on a separate temporary kernel
+ // to avoid interfering with the processing running in the audio
+ // thread on the main kernels.
+
+ std::unique_ptr<BiquadDSPKernel> response_kernel =
+ std::make_unique<BiquadDSPKernel>(this);
+ response_kernel->GetFrequencyResponse(n_frequencies, frequency_hz,
+ mag_response, phase_response);
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/biquad_processor.h b/chromium/third_party/blink/renderer/modules/webaudio/biquad_processor.h
new file mode 100644
index 00000000000..17ea56bed34
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/biquad_processor.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_BIQUAD_PROCESSOR_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_BIQUAD_PROCESSOR_H_
+
+#include <memory>
+#include "base/memory/scoped_refptr.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_param.h"
+#include "third_party/blink/renderer/platform/audio/audio_dsp_kernel.h"
+#include "third_party/blink/renderer/platform/audio/audio_dsp_kernel_processor.h"
+#include "third_party/blink/renderer/platform/audio/biquad.h"
+
+namespace blink {
+
+// BiquadProcessor is an AudioDSPKernelProcessor which uses Biquad objects to
+// implement several common filters.
+
+class BiquadProcessor final : public AudioDSPKernelProcessor {
+ public:
+ // This values are used in histograms and should not be renumbered or deleted.
+ enum FilterType {
+ kLowPass = 0,
+ kHighPass = 1,
+ kBandPass = 2,
+ kLowShelf = 3,
+ kHighShelf = 4,
+ kPeaking = 5,
+ kNotch = 6,
+ kAllpass = 7
+ };
+
+ BiquadProcessor(float sample_rate,
+ size_t number_of_channels,
+ AudioParamHandler& frequency,
+ AudioParamHandler& q,
+ AudioParamHandler& gain,
+ AudioParamHandler& detune);
+ ~BiquadProcessor() override;
+
+ std::unique_ptr<AudioDSPKernel> CreateKernel() override;
+
+ void Process(const AudioBus* source,
+ AudioBus* destination,
+ size_t frames_to_process) override;
+
+ void ProcessOnlyAudioParams(size_t frames_to_process) override;
+
+ // Get the magnitude and phase response of the filter at the given
+ // set of frequencies (in Hz). The phase response is in radians.
+ void GetFrequencyResponse(int n_frequencies,
+ const float* frequency_hz,
+ float* mag_response,
+ float* phase_response);
+
+ void CheckForDirtyCoefficients();
+
+ bool FilterCoefficientsDirty() const { return filter_coefficients_dirty_; }
+ bool HasSampleAccurateValues() const { return has_sample_accurate_values_; }
+
+ AudioParamHandler& Parameter1() { return *parameter1_; }
+ AudioParamHandler& Parameter2() { return *parameter2_; }
+ AudioParamHandler& Parameter3() { return *parameter3_; }
+ AudioParamHandler& Parameter4() { return *parameter4_; }
+
+ FilterType GetType() const { return type_; }
+ void SetType(FilterType);
+
+ private:
+ FilterType type_;
+
+ scoped_refptr<AudioParamHandler> parameter1_;
+ scoped_refptr<AudioParamHandler> parameter2_;
+ scoped_refptr<AudioParamHandler> parameter3_;
+ scoped_refptr<AudioParamHandler> parameter4_;
+
+ // so DSP kernels know when to re-compute coefficients
+ bool filter_coefficients_dirty_;
+
+ // Set to true if any of the filter parameters are sample-accurate.
+ bool has_sample_accurate_values_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_BIQUAD_PROCESSOR_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/channel_merger_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/channel_merger_node.cc
new file mode 100644
index 00000000000..02a4795947d
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/channel_merger_node.cc
@@ -0,0 +1,180 @@
+/*
+ * Copyright (C) 2010 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "third_party/blink/renderer/bindings/core/v8/exception_messages.h"
+#include "third_party/blink/renderer/bindings/core/v8/exception_state.h"
+#include "third_party/blink/renderer/core/dom/exception_code.h"
+#include "third_party/blink/renderer/core/execution_context/execution_context.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node_input.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node_output.h"
+#include "third_party/blink/renderer/modules/webaudio/base_audio_context.h"
+#include "third_party/blink/renderer/modules/webaudio/channel_merger_node.h"
+#include "third_party/blink/renderer/modules/webaudio/channel_merger_options.h"
+
+namespace blink {
+
+ChannelMergerHandler::ChannelMergerHandler(AudioNode& node,
+ float sample_rate,
+ unsigned number_of_inputs)
+ : AudioHandler(kNodeTypeChannelMerger, node, sample_rate) {
+ // These properties are fixed for the node and cannot be changed by user.
+ channel_count_ = 1;
+ SetInternalChannelCountMode(kExplicit);
+
+ // Create the requested number of inputs.
+ for (unsigned i = 0; i < number_of_inputs; ++i)
+ AddInput();
+
+ // Create the output with the requested number of channels.
+ AddOutput(number_of_inputs);
+
+ Initialize();
+}
+
+scoped_refptr<ChannelMergerHandler> ChannelMergerHandler::Create(
+ AudioNode& node,
+ float sample_rate,
+ unsigned number_of_inputs) {
+ return base::AdoptRef(
+ new ChannelMergerHandler(node, sample_rate, number_of_inputs));
+}
+
+void ChannelMergerHandler::Process(size_t frames_to_process) {
+ AudioNodeOutput& output = this->Output(0);
+ DCHECK_EQ(frames_to_process, output.Bus()->length());
+
+ unsigned number_of_output_channels = output.NumberOfChannels();
+ DCHECK_EQ(NumberOfInputs(), number_of_output_channels);
+
+ // Merge multiple inputs into one output.
+ for (unsigned i = 0; i < number_of_output_channels; ++i) {
+ AudioNodeInput& input = this->Input(i);
+ DCHECK_EQ(input.NumberOfChannels(), 1u);
+ AudioChannel* output_channel = output.Bus()->Channel(i);
+ if (input.IsConnected()) {
+ // The mixing rules will be applied so multiple channels are down-
+ // mixed to mono (when the mixing rule is defined). Note that only
+ // the first channel will be taken for the undefined input channel
+ // layout.
+ //
+ // See:
+ // http://webaudio.github.io/web-audio-api/#channel-up-mixing-and-down-mixing
+ AudioChannel* input_channel = input.Bus()->Channel(0);
+ output_channel->CopyFrom(input_channel);
+
+ } else {
+ // If input is unconnected, fill zeros in the channel.
+ output_channel->Zero();
+ }
+ }
+}
+
+void ChannelMergerHandler::SetChannelCount(unsigned long channel_count,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+ BaseAudioContext::GraphAutoLocker locker(Context());
+
+ // channelCount must be 1.
+ if (channel_count != 1) {
+ exception_state.ThrowDOMException(
+ kInvalidStateError,
+ "ChannelMerger: channelCount cannot be changed from 1");
+ }
+}
+
+void ChannelMergerHandler::SetChannelCountMode(
+ const String& mode,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+ BaseAudioContext::GraphAutoLocker locker(Context());
+
+ // channcelCountMode must be 'explicit'.
+ if (mode != "explicit") {
+ exception_state.ThrowDOMException(
+ kInvalidStateError,
+ "ChannelMerger: channelCountMode cannot be changed from 'explicit'");
+ }
+}
+
+// ----------------------------------------------------------------
+
+ChannelMergerNode::ChannelMergerNode(BaseAudioContext& context,
+ unsigned number_of_inputs)
+ : AudioNode(context) {
+ SetHandler(ChannelMergerHandler::Create(*this, context.sampleRate(),
+ number_of_inputs));
+}
+
+ChannelMergerNode* ChannelMergerNode::Create(BaseAudioContext& context,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ // The default number of inputs for the merger node is 6.
+ return Create(context, 6, exception_state);
+}
+
+ChannelMergerNode* ChannelMergerNode::Create(BaseAudioContext& context,
+ unsigned number_of_inputs,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ if (context.IsContextClosed()) {
+ context.ThrowExceptionForClosedState(exception_state);
+ return nullptr;
+ }
+
+ if (!number_of_inputs ||
+ number_of_inputs > BaseAudioContext::MaxNumberOfChannels()) {
+ exception_state.ThrowDOMException(
+ kIndexSizeError, ExceptionMessages::IndexOutsideRange<size_t>(
+ "number of inputs", number_of_inputs, 1,
+ ExceptionMessages::kInclusiveBound,
+ BaseAudioContext::MaxNumberOfChannels(),
+ ExceptionMessages::kInclusiveBound));
+ return nullptr;
+ }
+
+ return new ChannelMergerNode(context, number_of_inputs);
+}
+
+ChannelMergerNode* ChannelMergerNode::Create(
+ BaseAudioContext* context,
+ const ChannelMergerOptions& options,
+ ExceptionState& exception_state) {
+ ChannelMergerNode* node =
+ Create(*context, options.numberOfInputs(), exception_state);
+
+ if (!node)
+ return nullptr;
+
+ node->HandleChannelOptions(options, exception_state);
+
+ return node;
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/channel_merger_node.h b/chromium/third_party/blink/renderer/modules/webaudio/channel_merger_node.h
new file mode 100644
index 00000000000..7ab089c8657
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/channel_merger_node.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2010 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_CHANNEL_MERGER_NODE_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_CHANNEL_MERGER_NODE_H_
+
+#include "base/memory/scoped_refptr.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node.h"
+
+namespace blink {
+
+class BaseAudioContext;
+class ChannelMergerOptions;
+
+class ChannelMergerHandler final : public AudioHandler {
+ public:
+ static scoped_refptr<ChannelMergerHandler> Create(AudioNode&,
+ float sample_rate,
+ unsigned number_of_inputs);
+
+ void Process(size_t frames_to_process) override;
+ void SetChannelCount(unsigned long, ExceptionState&) final;
+ void SetChannelCountMode(const String&, ExceptionState&) final;
+
+ double TailTime() const override { return 0; }
+ double LatencyTime() const override { return 0; }
+ bool RequiresTailProcessing() const final { return false; }
+
+ private:
+ ChannelMergerHandler(AudioNode&,
+ float sample_rate,
+ unsigned number_of_inputs);
+};
+
+class ChannelMergerNode final : public AudioNode {
+ DEFINE_WRAPPERTYPEINFO();
+
+ public:
+ static ChannelMergerNode* Create(BaseAudioContext&, ExceptionState&);
+ static ChannelMergerNode* Create(BaseAudioContext&,
+ unsigned number_of_inputs,
+ ExceptionState&);
+ static ChannelMergerNode* Create(BaseAudioContext*,
+ const ChannelMergerOptions&,
+ ExceptionState&);
+
+ private:
+ ChannelMergerNode(BaseAudioContext&, unsigned number_of_inputs);
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_CHANNEL_MERGER_NODE_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/channel_merger_node.idl b/chromium/third_party/blink/renderer/modules/webaudio/channel_merger_node.idl
new file mode 100644
index 00000000000..4998b14bfe0
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/channel_merger_node.idl
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2010 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// See https://webaudio.github.io/web-audio-api/#channelmergernode
+[
+ Constructor(BaseAudioContext context, optional ChannelMergerOptions options),
+ RaisesException=Constructor,
+ Measure
+]
+interface ChannelMergerNode : AudioNode {
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/channel_merger_options.idl b/chromium/third_party/blink/renderer/modules/webaudio/channel_merger_options.idl
new file mode 100644
index 00000000000..f7b82086f37
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/channel_merger_options.idl
@@ -0,0 +1,8 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See https://webaudio.github.io/web-audio-api/#dictdef-channelmergeroptions
+dictionary ChannelMergerOptions : AudioNodeOptions {
+ unsigned long numberOfInputs = 6;
+}; \ No newline at end of file
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/channel_splitter_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/channel_splitter_node.cc
new file mode 100644
index 00000000000..19e0feca8fa
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/channel_splitter_node.cc
@@ -0,0 +1,186 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#include "third_party/blink/renderer/bindings/core/v8/exception_messages.h"
+#include "third_party/blink/renderer/bindings/core/v8/exception_state.h"
+#include "third_party/blink/renderer/core/dom/exception_code.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node_input.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node_output.h"
+#include "third_party/blink/renderer/modules/webaudio/base_audio_context.h"
+#include "third_party/blink/renderer/modules/webaudio/channel_splitter_node.h"
+#include "third_party/blink/renderer/modules/webaudio/channel_splitter_options.h"
+
+namespace blink {
+
+ChannelSplitterHandler::ChannelSplitterHandler(AudioNode& node,
+ float sample_rate,
+ unsigned number_of_outputs)
+ : AudioHandler(kNodeTypeChannelSplitter, node, sample_rate) {
+ // These properties are fixed and cannot be changed by the user.
+ channel_count_ = number_of_outputs;
+ SetInternalChannelCountMode(kExplicit);
+ SetInternalChannelInterpretation(AudioBus::kDiscrete);
+ AddInput();
+
+ // Create a fixed number of outputs (able to handle the maximum number of
+ // channels fed to an input).
+ for (unsigned i = 0; i < number_of_outputs; ++i)
+ AddOutput(1);
+
+ Initialize();
+}
+
+scoped_refptr<ChannelSplitterHandler> ChannelSplitterHandler::Create(
+ AudioNode& node,
+ float sample_rate,
+ unsigned number_of_outputs) {
+ return base::AdoptRef(
+ new ChannelSplitterHandler(node, sample_rate, number_of_outputs));
+}
+
+void ChannelSplitterHandler::Process(size_t frames_to_process) {
+ AudioBus* source = Input(0).Bus();
+ DCHECK(source);
+ DCHECK_EQ(frames_to_process, source->length());
+
+ unsigned number_of_source_channels = source->NumberOfChannels();
+
+ for (unsigned i = 0; i < NumberOfOutputs(); ++i) {
+ AudioBus* destination = Output(i).Bus();
+ DCHECK(destination);
+
+ if (i < number_of_source_channels) {
+ // Split the channel out if it exists in the source.
+ // It would be nice to avoid the copy and simply pass along pointers, but
+ // this becomes extremely difficult with fanout and fanin.
+ destination->Channel(0)->CopyFrom(source->Channel(i));
+ } else if (Output(i).RenderingFanOutCount() > 0) {
+ // Only bother zeroing out the destination if it's connected to anything
+ destination->Zero();
+ }
+ }
+}
+
+void ChannelSplitterHandler::SetChannelCount(unsigned long channel_count,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+ BaseAudioContext::GraphAutoLocker locker(Context());
+
+ // channelCount cannot be changed from the number of outputs.
+ if (channel_count != NumberOfOutputs()) {
+ exception_state.ThrowDOMException(
+ kInvalidStateError,
+ "ChannelSplitter: channelCount cannot be changed from " +
+ String::Number(NumberOfOutputs()));
+ }
+}
+
+void ChannelSplitterHandler::SetChannelCountMode(
+ const String& mode,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+ BaseAudioContext::GraphAutoLocker locker(Context());
+
+ // channcelCountMode must be 'explicit'.
+ if (mode != "explicit") {
+ exception_state.ThrowDOMException(
+ kInvalidStateError,
+ "ChannelSplitter: channelCountMode cannot be changed from 'explicit'");
+ }
+}
+
+void ChannelSplitterHandler::SetChannelInterpretation(
+ const String& mode,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+ BaseAudioContext::GraphAutoLocker locker(Context());
+
+ // channelInterpretation must be "discrete"
+ if (mode != "discrete") {
+ exception_state.ThrowDOMException(kInvalidStateError,
+ "ChannelSplitter: channelInterpretation "
+ "cannot be changed from 'discrete'");
+ }
+}
+
+// ----------------------------------------------------------------
+
+ChannelSplitterNode::ChannelSplitterNode(BaseAudioContext& context,
+ unsigned number_of_outputs)
+ : AudioNode(context) {
+ SetHandler(ChannelSplitterHandler::Create(*this, context.sampleRate(),
+ number_of_outputs));
+}
+
+ChannelSplitterNode* ChannelSplitterNode::Create(
+ BaseAudioContext& context,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ // Default number of outputs for the splitter node is 6.
+ return Create(context, 6, exception_state);
+}
+
+ChannelSplitterNode* ChannelSplitterNode::Create(
+ BaseAudioContext& context,
+ unsigned number_of_outputs,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ if (context.IsContextClosed()) {
+ context.ThrowExceptionForClosedState(exception_state);
+ return nullptr;
+ }
+
+ if (!number_of_outputs ||
+ number_of_outputs > BaseAudioContext::MaxNumberOfChannels()) {
+ exception_state.ThrowDOMException(
+ kIndexSizeError, ExceptionMessages::IndexOutsideRange<size_t>(
+ "number of outputs", number_of_outputs, 1,
+ ExceptionMessages::kInclusiveBound,
+ BaseAudioContext::MaxNumberOfChannels(),
+ ExceptionMessages::kInclusiveBound));
+ return nullptr;
+ }
+
+ return new ChannelSplitterNode(context, number_of_outputs);
+}
+
+ChannelSplitterNode* ChannelSplitterNode::Create(
+ BaseAudioContext* context,
+ const ChannelSplitterOptions& options,
+ ExceptionState& exception_state) {
+ ChannelSplitterNode* node =
+ Create(*context, options.numberOfOutputs(), exception_state);
+
+ if (!node)
+ return nullptr;
+
+ node->HandleChannelOptions(options, exception_state);
+
+ return node;
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/channel_splitter_node.h b/chromium/third_party/blink/renderer/modules/webaudio/channel_splitter_node.h
new file mode 100644
index 00000000000..31e2e9af892
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/channel_splitter_node.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_CHANNEL_SPLITTER_NODE_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_CHANNEL_SPLITTER_NODE_H_
+
+#include "base/memory/scoped_refptr.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node.h"
+
+namespace blink {
+
+class BaseAudioContext;
+class ChannelSplitterOptions;
+
+class ChannelSplitterHandler final : public AudioHandler {
+ public:
+ static scoped_refptr<ChannelSplitterHandler>
+ Create(AudioNode&, float sample_rate, unsigned number_of_outputs);
+
+ // AudioHandler
+ void Process(size_t frames_to_process) override;
+ void SetChannelCount(unsigned long, ExceptionState&) final;
+ void SetChannelCountMode(const String&, ExceptionState&) final;
+ void SetChannelInterpretation(const String&, ExceptionState&) final;
+
+ double TailTime() const override { return 0; }
+ double LatencyTime() const override { return 0; }
+ bool RequiresTailProcessing() const final { return false; }
+
+ private:
+ ChannelSplitterHandler(AudioNode&,
+ float sample_rate,
+ unsigned number_of_outputs);
+};
+
+class ChannelSplitterNode final : public AudioNode {
+ DEFINE_WRAPPERTYPEINFO();
+
+ public:
+ static ChannelSplitterNode* Create(BaseAudioContext&, ExceptionState&);
+ static ChannelSplitterNode* Create(BaseAudioContext&,
+ unsigned number_of_outputs,
+ ExceptionState&);
+ static ChannelSplitterNode* Create(BaseAudioContext*,
+ const ChannelSplitterOptions&,
+ ExceptionState&);
+
+ private:
+ ChannelSplitterNode(BaseAudioContext&, unsigned number_of_outputs);
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_CHANNEL_SPLITTER_NODE_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/channel_splitter_node.idl b/chromium/third_party/blink/renderer/modules/webaudio/channel_splitter_node.idl
new file mode 100644
index 00000000000..8f8d72134be
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/channel_splitter_node.idl
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+// See https://webaudio.github.io/web-audio-api/#channelsplitternode
+[
+ Constructor(BaseAudioContext context, optional ChannelSplitterOptions options),
+ RaisesException=Constructor,
+ Measure
+]
+interface ChannelSplitterNode : AudioNode {
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/channel_splitter_options.idl b/chromium/third_party/blink/renderer/modules/webaudio/channel_splitter_options.idl
new file mode 100644
index 00000000000..7bc68cf4a49
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/channel_splitter_options.idl
@@ -0,0 +1,8 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See https://webaudio.github.io/web-audio-api/#dictdef-channelsplitteroptions
+dictionary ChannelSplitterOptions : AudioNodeOptions {
+ unsigned long numberOfOutputs = 6;
+}; \ No newline at end of file
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/constant_source_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/constant_source_node.cc
new file mode 100644
index 00000000000..ea86ee7e890
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/constant_source_node.cc
@@ -0,0 +1,160 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/modules/webaudio/constant_source_node.h"
+
+#include <algorithm>
+#include "third_party/blink/renderer/bindings/core/v8/exception_messages.h"
+#include "third_party/blink/renderer/bindings/core/v8/exception_state.h"
+#include "third_party/blink/renderer/core/dom/exception_code.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node_output.h"
+#include "third_party/blink/renderer/modules/webaudio/constant_source_options.h"
+#include "third_party/blink/renderer/platform/audio/audio_utilities.h"
+#include "third_party/blink/renderer/platform/wtf/math_extras.h"
+#include "third_party/blink/renderer/platform/wtf/std_lib_extras.h"
+
+namespace blink {
+
+ConstantSourceHandler::ConstantSourceHandler(AudioNode& node,
+ float sample_rate,
+ AudioParamHandler& offset)
+ : AudioScheduledSourceHandler(kNodeTypeConstantSource, node, sample_rate),
+ offset_(&offset),
+ sample_accurate_values_(AudioUtilities::kRenderQuantumFrames) {
+ // A ConstantSource is always mono.
+ AddOutput(1);
+
+ Initialize();
+}
+
+scoped_refptr<ConstantSourceHandler> ConstantSourceHandler::Create(
+ AudioNode& node,
+ float sample_rate,
+ AudioParamHandler& offset) {
+ return base::AdoptRef(new ConstantSourceHandler(node, sample_rate, offset));
+}
+
+ConstantSourceHandler::~ConstantSourceHandler() {
+ Uninitialize();
+}
+
+void ConstantSourceHandler::Process(size_t frames_to_process) {
+ AudioBus* output_bus = Output(0).Bus();
+ DCHECK(output_bus);
+
+ if (!IsInitialized() || !output_bus->NumberOfChannels()) {
+ output_bus->Zero();
+ return;
+ }
+
+ // The audio thread can't block on this lock, so we call tryLock() instead.
+ MutexTryLocker try_locker(process_lock_);
+ if (!try_locker.Locked()) {
+ // Too bad - the tryLock() failed.
+ output_bus->Zero();
+ return;
+ }
+
+ size_t quantum_frame_offset;
+ size_t non_silent_frames_to_process;
+ double start_frame_offset;
+
+ // Figure out where in the current rendering quantum that the source is
+ // active and for how many frames.
+ UpdateSchedulingInfo(frames_to_process, output_bus, quantum_frame_offset,
+ non_silent_frames_to_process, start_frame_offset);
+
+ if (!non_silent_frames_to_process) {
+ output_bus->Zero();
+ return;
+ }
+
+ if (offset_->HasSampleAccurateValues()) {
+ DCHECK_LE(frames_to_process, sample_accurate_values_.size());
+ if (frames_to_process <= sample_accurate_values_.size()) {
+ float* offsets = sample_accurate_values_.Data();
+ offset_->CalculateSampleAccurateValues(offsets, frames_to_process);
+ if (non_silent_frames_to_process > 0) {
+ memcpy(output_bus->Channel(0)->MutableData() + quantum_frame_offset,
+ offsets + quantum_frame_offset,
+ non_silent_frames_to_process * sizeof(*offsets));
+ output_bus->ClearSilentFlag();
+ } else {
+ output_bus->Zero();
+ }
+ }
+ } else {
+ float value = offset_->Value();
+
+ if (value == 0) {
+ output_bus->Zero();
+ } else {
+ float* dest = output_bus->Channel(0)->MutableData();
+ dest += quantum_frame_offset;
+ for (unsigned k = 0; k < non_silent_frames_to_process; ++k) {
+ dest[k] = value;
+ }
+ output_bus->ClearSilentFlag();
+ }
+ }
+}
+
+bool ConstantSourceHandler::PropagatesSilence() const {
+ return !IsPlayingOrScheduled() || HasFinished();
+}
+
+// ----------------------------------------------------------------
+ConstantSourceNode::ConstantSourceNode(BaseAudioContext& context)
+ : AudioScheduledSourceNode(context),
+ offset_(AudioParam::Create(context,
+ kParamTypeConstantSourceOffset,
+ "ConstantSource.offset",
+ 1)) {
+ SetHandler(ConstantSourceHandler::Create(*this, context.sampleRate(),
+ offset_->Handler()));
+}
+
+ConstantSourceNode* ConstantSourceNode::Create(
+ BaseAudioContext& context,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ if (context.IsContextClosed()) {
+ context.ThrowExceptionForClosedState(exception_state);
+ return nullptr;
+ }
+
+ return new ConstantSourceNode(context);
+}
+
+ConstantSourceNode* ConstantSourceNode::Create(
+ BaseAudioContext* context,
+ const ConstantSourceOptions& options,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ ConstantSourceNode* node = Create(*context, exception_state);
+
+ if (!node)
+ return nullptr;
+
+ node->offset()->setValue(options.offset());
+
+ return node;
+}
+
+void ConstantSourceNode::Trace(blink::Visitor* visitor) {
+ visitor->Trace(offset_);
+ AudioScheduledSourceNode::Trace(visitor);
+}
+
+ConstantSourceHandler& ConstantSourceNode::GetConstantSourceHandler() const {
+ return static_cast<ConstantSourceHandler&>(Handler());
+}
+
+AudioParam* ConstantSourceNode::offset() {
+ return offset_;
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/constant_source_node.h b/chromium/third_party/blink/renderer/modules/webaudio/constant_source_node.h
new file mode 100644
index 00000000000..1c0b3258703
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/constant_source_node.h
@@ -0,0 +1,65 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_CONSTANT_SOURCE_NODE_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_CONSTANT_SOURCE_NODE_H_
+
+#include "base/memory/scoped_refptr.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_param.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_scheduled_source_node.h"
+#include "third_party/blink/renderer/platform/audio/audio_bus.h"
+#include "third_party/blink/renderer/platform/wtf/threading.h"
+
+namespace blink {
+
+class BaseAudioContext;
+class ConstantSourceOptions;
+class ExceptionState;
+
+// ConstantSourceNode is an audio generator for a constant source
+
+class ConstantSourceHandler final : public AudioScheduledSourceHandler {
+ public:
+ static scoped_refptr<ConstantSourceHandler> Create(AudioNode&,
+ float sample_rate,
+ AudioParamHandler& offset);
+ ~ConstantSourceHandler() override;
+
+ // AudioHandler
+ void Process(size_t frames_to_process) override;
+
+ private:
+ ConstantSourceHandler(AudioNode&,
+ float sample_rate,
+ AudioParamHandler& offset);
+
+ // If we are no longer playing, propogate silence ahead to downstream nodes.
+ bool PropagatesSilence() const override;
+
+ scoped_refptr<AudioParamHandler> offset_;
+ AudioFloatArray sample_accurate_values_;
+};
+
+class ConstantSourceNode final : public AudioScheduledSourceNode {
+ DEFINE_WRAPPERTYPEINFO();
+
+ public:
+ static ConstantSourceNode* Create(BaseAudioContext&, ExceptionState&);
+ static ConstantSourceNode* Create(BaseAudioContext*,
+ const ConstantSourceOptions&,
+ ExceptionState&);
+ virtual void Trace(blink::Visitor*);
+
+ AudioParam* offset();
+
+ private:
+ ConstantSourceNode(BaseAudioContext&);
+ ConstantSourceHandler& GetConstantSourceHandler() const;
+
+ Member<AudioParam> offset_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_CONSTANT_SOURCE_NODE_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/constant_source_node.idl b/chromium/third_party/blink/renderer/modules/webaudio/constant_source_node.idl
new file mode 100644
index 00000000000..8342f2dc89e
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/constant_source_node.idl
@@ -0,0 +1,14 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See https://webaudio.github.io/web-audio-api/#ConstantSourceNode
+[
+ Constructor(BaseAudioContext context, optional ConstantSourceOptions options),
+ RaisesException=Constructor,
+ ActiveScriptWrappable,
+ Measure
+]
+interface ConstantSourceNode : AudioScheduledSourceNode {
+ readonly attribute AudioParam offset;
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/constant_source_options.idl b/chromium/third_party/blink/renderer/modules/webaudio/constant_source_options.idl
new file mode 100644
index 00000000000..dee22568e17
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/constant_source_options.idl
@@ -0,0 +1,8 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See https://webaudio.github.io/web-audio-api/#dictdef-constantsourceoptions
+dictionary ConstantSourceOptions {
+ float offset = 1;
+}; \ No newline at end of file
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/convolver_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/convolver_node.cc
new file mode 100644
index 00000000000..e0e008a7577
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/convolver_node.cc
@@ -0,0 +1,315 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#include <memory>
+#include "third_party/blink/renderer/bindings/core/v8/exception_state.h"
+#include "third_party/blink/renderer/core/dom/exception_code.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_buffer.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node_input.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node_output.h"
+#include "third_party/blink/renderer/modules/webaudio/convolver_node.h"
+#include "third_party/blink/renderer/modules/webaudio/convolver_options.h"
+#include "third_party/blink/renderer/platform/audio/reverb.h"
+
+// Note about empirical tuning:
+// The maximum FFT size affects reverb performance and accuracy.
+// If the reverb is single-threaded and processes entirely in the real-time
+// audio thread, it's important not to make this too high. In this case 8192 is
+// a good value. But, the Reverb object is multi-threaded, so we want this as
+// high as possible without losing too much accuracy. Very large FFTs will have
+// worse phase errors. Given these constraints 32768 is a good compromise.
+const size_t MaxFFTSize = 32768;
+
+namespace blink {
+
+ConvolverHandler::ConvolverHandler(AudioNode& node, float sample_rate)
+ : AudioHandler(kNodeTypeConvolver, node, sample_rate), normalize_(true) {
+ AddInput();
+ AddOutput(1);
+
+ // Node-specific default mixing rules.
+ channel_count_ = 2;
+ SetInternalChannelCountMode(kClampedMax);
+ SetInternalChannelInterpretation(AudioBus::kSpeakers);
+
+ Initialize();
+}
+
+scoped_refptr<ConvolverHandler> ConvolverHandler::Create(AudioNode& node,
+ float sample_rate) {
+ return base::AdoptRef(new ConvolverHandler(node, sample_rate));
+}
+
+ConvolverHandler::~ConvolverHandler() {
+ Uninitialize();
+}
+
+void ConvolverHandler::Process(size_t frames_to_process) {
+ AudioBus* output_bus = Output(0).Bus();
+ DCHECK(output_bus);
+
+ // Synchronize with possible dynamic changes to the impulse response.
+ MutexTryLocker try_locker(process_lock_);
+ if (try_locker.Locked()) {
+ if (!IsInitialized() || !reverb_) {
+ output_bus->Zero();
+ } else {
+ // Process using the convolution engine.
+ // Note that we can handle the case where nothing is connected to the
+ // input, in which case we'll just feed silence into the convolver.
+ // FIXME: If we wanted to get fancy we could try to factor in the 'tail
+ // time' and stop processing once the tail dies down if
+ // we keep getting fed silence.
+ reverb_->Process(Input(0).Bus(), output_bus, frames_to_process);
+ }
+ } else {
+ // Too bad - the tryLock() failed. We must be in the middle of setting a
+ // new impulse response.
+ output_bus->Zero();
+ }
+}
+
+void ConvolverHandler::SetBuffer(AudioBuffer* buffer,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ if (!buffer)
+ return;
+
+ if (buffer->sampleRate() != Context()->sampleRate()) {
+ exception_state.ThrowDOMException(
+ kNotSupportedError,
+ "The buffer sample rate of " + String::Number(buffer->sampleRate()) +
+ " does not match the context rate of " +
+ String::Number(Context()->sampleRate()) + " Hz.");
+ return;
+ }
+
+ unsigned number_of_channels = buffer->numberOfChannels();
+ size_t buffer_length = buffer->length();
+
+ // The current implementation supports only 1-, 2-, or 4-channel impulse
+ // responses, with the 4-channel response being interpreted as true-stereo
+ // (see Reverb class).
+ bool is_channel_count_good = number_of_channels == 1 ||
+ number_of_channels == 2 ||
+ number_of_channels == 4;
+
+ if (!is_channel_count_good) {
+ exception_state.ThrowDOMException(
+ kNotSupportedError, "The buffer must have 1, 2, or 4 channels, not " +
+ String::Number(number_of_channels));
+ return;
+ }
+
+ // Wrap the AudioBuffer by an AudioBus. It's an efficient pointer set and not
+ // a memcpy(). This memory is simply used in the Reverb constructor and no
+ // reference to it is kept for later use in that class.
+ scoped_refptr<AudioBus> buffer_bus =
+ AudioBus::Create(number_of_channels, buffer_length, false);
+ for (unsigned i = 0; i < number_of_channels; ++i)
+ buffer_bus->SetChannelMemory(i, buffer->getChannelData(i).View()->Data(),
+ buffer_length);
+
+ buffer_bus->SetSampleRate(buffer->sampleRate());
+
+ // Create the reverb with the given impulse response.
+ std::unique_ptr<Reverb> reverb = std::make_unique<Reverb>(
+ buffer_bus.get(), AudioUtilities::kRenderQuantumFrames, MaxFFTSize,
+ Context() && Context()->HasRealtimeConstraint(), normalize_);
+
+ {
+ // The context must be locked since changing the buffer can
+ // re-configure the number of channels that are output.
+ BaseAudioContext::GraphAutoLocker context_locker(Context());
+
+ // Synchronize with process().
+ MutexLocker locker(process_lock_);
+ reverb_ = std::move(reverb);
+ buffer_ = buffer;
+ if (buffer) {
+ // This will propagate the channel count to any nodes connected further
+ // downstream in the graph.
+ Output(0).SetNumberOfChannels(ComputeNumberOfOutputChannels(
+ Input(0).NumberOfChannels(), buffer_->numberOfChannels()));
+ }
+ }
+}
+
+AudioBuffer* ConvolverHandler::Buffer() {
+ DCHECK(IsMainThread());
+ return buffer_.Get();
+}
+
+bool ConvolverHandler::RequiresTailProcessing() const {
+ // Always return true even if the tail time and latency might both be zero.
+ return true;
+}
+
+double ConvolverHandler::TailTime() const {
+ MutexTryLocker try_locker(process_lock_);
+ if (try_locker.Locked())
+ return reverb_ ? reverb_->ImpulseResponseLength() /
+ static_cast<double>(Context()->sampleRate())
+ : 0;
+ // Since we don't want to block the Audio Device thread, we return a large
+ // value instead of trying to acquire the lock.
+ return std::numeric_limits<double>::infinity();
+}
+
+double ConvolverHandler::LatencyTime() const {
+ MutexTryLocker try_locker(process_lock_);
+ if (try_locker.Locked())
+ return reverb_ ? reverb_->LatencyFrames() /
+ static_cast<double>(Context()->sampleRate())
+ : 0;
+ // Since we don't want to block the Audio Device thread, we return a large
+ // value instead of trying to acquire the lock.
+ return std::numeric_limits<double>::infinity();
+}
+
+unsigned ConvolverHandler::ComputeNumberOfOutputChannels(
+ unsigned input_channels,
+ unsigned response_channels) const {
+ // The number of output channels for a Convolver must be one or two.
+ // And can only be one if there's a mono source and a mono response
+ // buffer.
+ return clampTo(std::max(input_channels, response_channels), 1, 2);
+}
+
+void ConvolverHandler::SetChannelCount(unsigned long channel_count,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+ BaseAudioContext::GraphAutoLocker locker(Context());
+
+ // channelCount must be 2.
+ if (channel_count != 2) {
+ exception_state.ThrowDOMException(
+ kNotSupportedError,
+ "ConvolverNode: channelCount cannot be changed from 2");
+ }
+}
+
+void ConvolverHandler::SetChannelCountMode(const String& mode,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+ BaseAudioContext::GraphAutoLocker locker(Context());
+
+ // channcelCountMode must be 'clamped-max'.
+ if (mode != "clamped-max") {
+ exception_state.ThrowDOMException(
+ kNotSupportedError,
+ "ConvolverNode: channelCountMode cannot be changed from 'clamped-max'");
+ }
+}
+
+void ConvolverHandler::CheckNumberOfChannelsForInput(AudioNodeInput* input) {
+ DCHECK(Context()->IsAudioThread());
+ DCHECK(Context()->IsGraphOwner());
+
+ DCHECK(input);
+ DCHECK_EQ(input, &this->Input(0));
+ if (input != &this->Input(0))
+ return;
+
+ if (buffer_) {
+ unsigned number_of_output_channels = ComputeNumberOfOutputChannels(
+ input->NumberOfChannels(), buffer_->numberOfChannels());
+
+ if (IsInitialized() &&
+ number_of_output_channels != Output(0).NumberOfChannels()) {
+ // We're already initialized but the channel count has changed.
+ Uninitialize();
+ }
+
+ if (!IsInitialized()) {
+ // This will propagate the channel count to any nodes connected further
+ // downstream in the graph.
+ Output(0).SetNumberOfChannels(number_of_output_channels);
+ Initialize();
+ }
+ }
+
+ // Update the input's internal bus if needed.
+ AudioHandler::CheckNumberOfChannelsForInput(input);
+}
+// ----------------------------------------------------------------
+
+ConvolverNode::ConvolverNode(BaseAudioContext& context) : AudioNode(context) {
+ SetHandler(ConvolverHandler::Create(*this, context.sampleRate()));
+}
+
+ConvolverNode* ConvolverNode::Create(BaseAudioContext& context,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ if (context.IsContextClosed()) {
+ context.ThrowExceptionForClosedState(exception_state);
+ return nullptr;
+ }
+
+ return new ConvolverNode(context);
+}
+
+ConvolverNode* ConvolverNode::Create(BaseAudioContext* context,
+ const ConvolverOptions& options,
+ ExceptionState& exception_state) {
+ ConvolverNode* node = Create(*context, exception_state);
+
+ if (!node)
+ return nullptr;
+
+ node->HandleChannelOptions(options, exception_state);
+
+ // It is important to set normalize first because setting the buffer will
+ // examing the normalize attribute to see if normalization needs to be done.
+ node->setNormalize(!options.disableNormalization());
+ if (options.hasBuffer())
+ node->setBuffer(options.buffer(), exception_state);
+ return node;
+}
+
+ConvolverHandler& ConvolverNode::GetConvolverHandler() const {
+ return static_cast<ConvolverHandler&>(Handler());
+}
+
+AudioBuffer* ConvolverNode::buffer() const {
+ return GetConvolverHandler().Buffer();
+}
+
+void ConvolverNode::setBuffer(AudioBuffer* new_buffer,
+ ExceptionState& exception_state) {
+ GetConvolverHandler().SetBuffer(new_buffer, exception_state);
+}
+
+bool ConvolverNode::normalize() const {
+ return GetConvolverHandler().Normalize();
+}
+
+void ConvolverNode::setNormalize(bool normalize) {
+ GetConvolverHandler().SetNormalize(normalize);
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/convolver_node.h b/chromium/third_party/blink/renderer/modules/webaudio/convolver_node.h
new file mode 100644
index 00000000000..96ee2e419d1
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/convolver_node.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_CONVOLVER_NODE_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_CONVOLVER_NODE_H_
+
+#include <memory>
+#include "base/gtest_prod_util.h"
+#include "base/memory/scoped_refptr.h"
+#include "third_party/blink/renderer/modules/modules_export.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node.h"
+#include "third_party/blink/renderer/platform/wtf/threading_primitives.h"
+
+namespace blink {
+
+class AudioBuffer;
+class ConvolverOptions;
+class ExceptionState;
+class Reverb;
+
+class MODULES_EXPORT ConvolverHandler final : public AudioHandler {
+ public:
+ static scoped_refptr<ConvolverHandler> Create(AudioNode&, float sample_rate);
+ ~ConvolverHandler() override;
+
+ // AudioHandler
+ void Process(size_t frames_to_process) override;
+ // Called in the main thread when the number of channels for the input may
+ // have changed.
+ void CheckNumberOfChannelsForInput(AudioNodeInput*) override;
+
+ // Impulse responses
+ void SetBuffer(AudioBuffer*, ExceptionState&);
+ AudioBuffer* Buffer();
+
+ bool Normalize() const { return normalize_; }
+ void SetNormalize(bool normalize) { normalize_ = normalize; }
+ void SetChannelCount(unsigned long, ExceptionState&) final;
+ void SetChannelCountMode(const String&, ExceptionState&) final;
+
+ private:
+ ConvolverHandler(AudioNode&, float sample_rate);
+ double TailTime() const override;
+ double LatencyTime() const override;
+ bool RequiresTailProcessing() const final;
+
+ // Determine how many output channels to use from the number of
+ // input channels and the number of channels in the impulse response
+ // buffer.
+ unsigned ComputeNumberOfOutputChannels(unsigned input_channels,
+ unsigned response_channels) const;
+
+ std::unique_ptr<Reverb> reverb_;
+ // This Persistent doesn't make a reference cycle including the owner
+ // ConvolverNode.
+ // It is cross-thread, as it will be accessed by the audio and main threads.
+ CrossThreadPersistent<AudioBuffer> buffer_;
+
+ // This synchronizes dynamic changes to the convolution impulse response with
+ // process().
+ mutable Mutex process_lock_;
+
+ // Normalize the impulse response or not. Must default to true.
+ bool normalize_;
+
+ FRIEND_TEST_ALL_PREFIXES(ConvolverNodeTest, ReverbLifetime);
+};
+
+class MODULES_EXPORT ConvolverNode final : public AudioNode {
+ DEFINE_WRAPPERTYPEINFO();
+
+ public:
+ static ConvolverNode* Create(BaseAudioContext&, ExceptionState&);
+ static ConvolverNode* Create(BaseAudioContext*,
+ const ConvolverOptions&,
+ ExceptionState&);
+
+ AudioBuffer* buffer() const;
+ void setBuffer(AudioBuffer*, ExceptionState&);
+ bool normalize() const;
+ void setNormalize(bool);
+
+ private:
+ ConvolverNode(BaseAudioContext&);
+ ConvolverHandler& GetConvolverHandler() const;
+
+ FRIEND_TEST_ALL_PREFIXES(ConvolverNodeTest, ReverbLifetime);
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_CONVOLVER_NODE_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/convolver_node.idl b/chromium/third_party/blink/renderer/modules/webaudio/convolver_node.idl
new file mode 100644
index 00000000000..53559c561a8
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/convolver_node.idl
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+// A linear convolution effect
+// See https://webaudio.github.io/web-audio-api/#ConvolverNode
+[
+ Constructor(BaseAudioContext context, optional ConvolverOptions options),
+ RaisesException=Constructor,
+ Measure
+]
+interface ConvolverNode : AudioNode {
+ [RaisesException=Setter] attribute AudioBuffer? buffer;
+ attribute boolean normalize;
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/convolver_node_test.cc b/chromium/third_party/blink/renderer/modules/webaudio/convolver_node_test.cc
new file mode 100644
index 00000000000..548264494ef
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/convolver_node_test.cc
@@ -0,0 +1,28 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <memory>
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/blink/renderer/core/testing/dummy_page_holder.h"
+#include "third_party/blink/renderer/modules/webaudio/convolver_node.h"
+#include "third_party/blink/renderer/modules/webaudio/offline_audio_context.h"
+
+namespace blink {
+
+TEST(ConvolverNodeTest, ReverbLifetime) {
+ std::unique_ptr<DummyPageHolder> page = DummyPageHolder::Create();
+ OfflineAudioContext* context = OfflineAudioContext::Create(
+ &page->GetDocument(), 2, 1, 48000, ASSERT_NO_EXCEPTION);
+ ConvolverNode* node = context->createConvolver(ASSERT_NO_EXCEPTION);
+ ConvolverHandler& handler = node->GetConvolverHandler();
+ EXPECT_FALSE(handler.reverb_);
+ node->setBuffer(AudioBuffer::Create(2, 1, 48000), ASSERT_NO_EXCEPTION);
+ EXPECT_TRUE(handler.reverb_);
+ BaseAudioContext::GraphAutoLocker locker(context);
+ handler.Dispose();
+ // m_reverb should live after dispose() because an audio thread is using it.
+ EXPECT_TRUE(handler.reverb_);
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/convolver_options.idl b/chromium/third_party/blink/renderer/modules/webaudio/convolver_options.idl
new file mode 100644
index 00000000000..637e6e4152b
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/convolver_options.idl
@@ -0,0 +1,9 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See https://webaudio.github.io/web-audio-api/#dictdef-convolveroptions
+dictionary ConvolverOptions : AudioNodeOptions {
+ AudioBuffer? buffer;
+ boolean disableNormalization = false;
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/cross_thread_audio_worklet_processor_info.h b/chromium/third_party/blink/renderer/modules/webaudio/cross_thread_audio_worklet_processor_info.h
new file mode 100644
index 00000000000..01f6c0d9eee
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/cross_thread_audio_worklet_processor_info.h
@@ -0,0 +1,68 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_CROSS_THREAD_AUDIO_WORKLET_PROCESSOR_INFO_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_CROSS_THREAD_AUDIO_WORKLET_PROCESSOR_INFO_H_
+
+#include "third_party/blink/renderer/modules/webaudio/audio_param_descriptor.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_worklet_processor_definition.h"
+
+namespace blink {
+
+// A class for shallow repackage of |AudioParamDescriptor|. This is created only
+// when requested when the synchronization between AudioWorkletMessagingProxy
+// and AudioWorkletGlobalScope.
+class CrossThreadAudioParamInfo {
+ DISALLOW_NEW_EXCEPT_PLACEMENT_NEW();
+
+ public:
+ explicit CrossThreadAudioParamInfo(const AudioParamDescriptor* descriptor)
+ : name_(descriptor->name().IsolatedCopy()),
+ default_value_(descriptor->defaultValue()),
+ max_value_(descriptor->maxValue()),
+ min_value_(descriptor->minValue()) {}
+
+ const String& Name() const { return name_; }
+ float DefaultValue() const { return default_value_; }
+ float MaxValue() const { return max_value_; }
+ float MinValue() const { return min_value_; }
+
+ private:
+ const String name_;
+ const float default_value_;
+ const float max_value_;
+ const float min_value_;
+};
+
+// A class for shallow repackage of |AudioWorkletProcessorDefinition|. This is
+// created only when requested when the synchronization between
+// AudioWorkletMessagingProxy and AudioWorkletGlobalScope.
+class CrossThreadAudioWorkletProcessorInfo {
+ DISALLOW_NEW_EXCEPT_PLACEMENT_NEW();
+
+ public:
+ explicit CrossThreadAudioWorkletProcessorInfo(
+ const AudioWorkletProcessorDefinition& definition)
+ : name_(definition.GetName().IsolatedCopy()) {
+ // To avoid unnecessary reallocations of the vector.
+ param_info_list_.ReserveInitialCapacity(
+ definition.GetAudioParamDescriptorNames().size());
+
+ for (const String& name : definition.GetAudioParamDescriptorNames()) {
+ param_info_list_.emplace_back(
+ definition.GetAudioParamDescriptor(name));
+ }
+ }
+
+ const String& Name() const { return name_; }
+ Vector<CrossThreadAudioParamInfo> ParamInfoList() { return param_info_list_; }
+
+ private:
+ const String name_;
+ Vector<CrossThreadAudioParamInfo> param_info_list_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_CROSS_THREAD_AUDIO_WORKLET_PROCESSOR_INFO_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/default_audio_destination_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/default_audio_destination_node.cc
new file mode 100644
index 00000000000..50b4033940c
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/default_audio_destination_node.cc
@@ -0,0 +1,196 @@
+/*
+ * Copyright (C) 2011, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#include "third_party/blink/renderer/modules/webaudio/default_audio_destination_node.h"
+
+#include "third_party/blink/renderer/bindings/core/v8/exception_messages.h"
+#include "third_party/blink/renderer/bindings/core/v8/exception_state.h"
+#include "third_party/blink/renderer/core/dom/exception_code.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_worklet.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_worklet_messaging_proxy.h"
+#include "third_party/blink/renderer/modules/webaudio/base_audio_context.h"
+
+namespace blink {
+
+DefaultAudioDestinationHandler::DefaultAudioDestinationHandler(
+ AudioNode& node,
+ const WebAudioLatencyHint& latency_hint)
+ : AudioDestinationHandler(node),
+ number_of_input_channels_(0),
+ latency_hint_(latency_hint) {
+ // Node-specific default mixing rules.
+ channel_count_ = 2;
+ SetInternalChannelCountMode(kExplicit);
+ SetInternalChannelInterpretation(AudioBus::kSpeakers);
+}
+
+scoped_refptr<DefaultAudioDestinationHandler>
+DefaultAudioDestinationHandler::Create(
+ AudioNode& node,
+ const WebAudioLatencyHint& latency_hint) {
+ return base::AdoptRef(new DefaultAudioDestinationHandler(node, latency_hint));
+}
+
+DefaultAudioDestinationHandler::~DefaultAudioDestinationHandler() {
+ DCHECK(!IsInitialized());
+}
+
+void DefaultAudioDestinationHandler::Dispose() {
+ Uninitialize();
+ AudioDestinationHandler::Dispose();
+}
+
+void DefaultAudioDestinationHandler::Initialize() {
+ DCHECK(IsMainThread());
+ if (IsInitialized())
+ return;
+
+ CreateDestination();
+ AudioHandler::Initialize();
+}
+
+void DefaultAudioDestinationHandler::Uninitialize() {
+ DCHECK(IsMainThread());
+ if (!IsInitialized())
+ return;
+
+ if (destination_->IsPlaying())
+ StopDestination();
+
+ number_of_input_channels_ = 0;
+ AudioHandler::Uninitialize();
+}
+
+void DefaultAudioDestinationHandler::CreateDestination() {
+ destination_ = AudioDestination::Create(*this,
+ ChannelCount(), latency_hint_, Context()->GetSecurityOrigin());
+}
+
+void DefaultAudioDestinationHandler::StartDestination() {
+ DCHECK(!destination_->IsPlaying());
+
+ AudioWorklet* audio_worklet = Context()->audioWorklet();
+ if (audio_worklet && audio_worklet->IsReady()) {
+ // This task runner is only used to fire the audio render callback, so it
+ // MUST not be throttled to avoid potential audio glitch.
+ destination_->StartWithWorkletTaskRunner(
+ audio_worklet->GetMessagingProxy()->GetBackingWorkerThread()
+ ->GetTaskRunner(TaskType::kUnthrottled));
+ } else {
+ destination_->Start();
+ }
+}
+
+void DefaultAudioDestinationHandler::StopDestination() {
+ DCHECK(destination_->IsPlaying());
+ destination_->Stop();
+}
+
+void DefaultAudioDestinationHandler::StartRendering() {
+ DCHECK(IsInitialized());
+ // Context might try to start rendering again while the destination is
+ // running. Ignore it when that happens.
+ if (IsInitialized() && !destination_->IsPlaying()) {
+ StartDestination();
+ }
+}
+
+void DefaultAudioDestinationHandler::StopRendering() {
+ DCHECK(IsInitialized());
+ // Context might try to stop rendering again while the destination is stopped.
+ // Ignore it when that happens.
+ if (IsInitialized() && destination_->IsPlaying()) {
+ StopDestination();
+ }
+}
+
+void DefaultAudioDestinationHandler::RestartRendering() {
+ StopRendering();
+ StartRendering();
+}
+
+unsigned long DefaultAudioDestinationHandler::MaxChannelCount() const {
+ return AudioDestination::MaxChannelCount();
+}
+
+size_t DefaultAudioDestinationHandler::CallbackBufferSize() const {
+ return destination_->CallbackBufferSize();
+}
+
+void DefaultAudioDestinationHandler::SetChannelCount(
+ unsigned long channel_count,
+ ExceptionState& exception_state) {
+ // The channelCount for the input to this node controls the actual number of
+ // channels we send to the audio hardware. It can only be set depending on the
+ // maximum number of channels supported by the hardware.
+
+ DCHECK(IsMainThread());
+
+ if (!MaxChannelCount() || channel_count > MaxChannelCount()) {
+ exception_state.ThrowDOMException(
+ kIndexSizeError,
+ ExceptionMessages::IndexOutsideRange<unsigned>(
+ "channel count", channel_count, 1,
+ ExceptionMessages::kInclusiveBound, MaxChannelCount(),
+ ExceptionMessages::kInclusiveBound));
+ return;
+ }
+
+ unsigned long old_channel_count = this->ChannelCount();
+ AudioHandler::SetChannelCount(channel_count, exception_state);
+
+ if (!exception_state.HadException() &&
+ this->ChannelCount() != old_channel_count && IsInitialized()) {
+ // Recreate/restart destination.
+ StopDestination();
+ CreateDestination();
+ StartDestination();
+ }
+}
+
+double DefaultAudioDestinationHandler::SampleRate() const {
+ return destination_ ? destination_->SampleRate() : 0;
+}
+
+int DefaultAudioDestinationHandler::FramesPerBuffer() const {
+ return destination_ ? destination_->FramesPerBuffer() : 0;
+}
+
+// ----------------------------------------------------------------
+
+DefaultAudioDestinationNode::DefaultAudioDestinationNode(
+ BaseAudioContext& context,
+ const WebAudioLatencyHint& latency_hint)
+ : AudioDestinationNode(context) {
+ SetHandler(DefaultAudioDestinationHandler::Create(*this, latency_hint));
+}
+
+DefaultAudioDestinationNode* DefaultAudioDestinationNode::Create(
+ BaseAudioContext* context,
+ const WebAudioLatencyHint& latency_hint) {
+ return new DefaultAudioDestinationNode(*context, latency_hint);
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/default_audio_destination_node.h b/chromium/third_party/blink/renderer/modules/webaudio/default_audio_destination_node.h
new file mode 100644
index 00000000000..2abfee80394
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/default_audio_destination_node.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2011, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_DEFAULT_AUDIO_DESTINATION_NODE_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_DEFAULT_AUDIO_DESTINATION_NODE_H_
+
+#include <memory>
+#include "third_party/blink/public/platform/web_audio_latency_hint.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_destination_node.h"
+#include "third_party/blink/renderer/platform/audio/audio_destination.h"
+
+namespace blink {
+
+class BaseAudioContext;
+class ExceptionState;
+class WebAudioLatencyHint;
+
+class DefaultAudioDestinationHandler final : public AudioDestinationHandler {
+ public:
+ static scoped_refptr<DefaultAudioDestinationHandler> Create(
+ AudioNode&,
+ const WebAudioLatencyHint&);
+ ~DefaultAudioDestinationHandler() override;
+
+ // AudioHandler
+ void Dispose() override;
+ void Initialize() override;
+ void Uninitialize() override;
+ void SetChannelCount(unsigned long, ExceptionState&) override;
+
+ // AudioDestinationHandler
+ void StartRendering() override;
+ void StopRendering() override;
+ void RestartRendering() override;
+ unsigned long MaxChannelCount() const override;
+ // Returns the rendering callback buffer size.
+ size_t CallbackBufferSize() const override;
+ double SampleRate() const override;
+ int FramesPerBuffer() const override;
+
+ double TailTime() const override { return 0; }
+ double LatencyTime() const override { return 0; }
+ bool RequiresTailProcessing() const final { return false; }
+
+ private:
+ explicit DefaultAudioDestinationHandler(AudioNode&,
+ const WebAudioLatencyHint&);
+ void CreateDestination();
+
+ // Starts platform/AudioDestination. If the runtime flag for AudioWorklet is
+ // set, uses the AudioWorkletThread's backing thread for the rendering.
+ void StartDestination();
+
+ void StopDestination();
+
+ // Uses |RefPtr| to keep the AudioDestination alive until all the cross-thread
+ // tasks are completed.
+ scoped_refptr<AudioDestination> destination_;
+
+ String input_device_id_;
+ unsigned number_of_input_channels_;
+ const WebAudioLatencyHint latency_hint_;
+};
+
+class DefaultAudioDestinationNode final : public AudioDestinationNode {
+ public:
+ static DefaultAudioDestinationNode* Create(BaseAudioContext*,
+ const WebAudioLatencyHint&);
+
+ size_t CallbackBufferSize() const { return Handler().CallbackBufferSize(); };
+
+ private:
+ explicit DefaultAudioDestinationNode(BaseAudioContext&,
+ const WebAudioLatencyHint&);
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_DEFAULT_AUDIO_DESTINATION_NODE_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/deferred_task_handler.cc b/chromium/third_party/blink/renderer/modules/webaudio/deferred_task_handler.cc
new file mode 100644
index 00000000000..7978ed91a82
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/deferred_task_handler.cc
@@ -0,0 +1,388 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#include "third_party/blink/public/platform/platform.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node_output.h"
+#include "third_party/blink/renderer/modules/webaudio/deferred_task_handler.h"
+#include "third_party/blink/renderer/modules/webaudio/offline_audio_context.h"
+#include "third_party/blink/renderer/platform/cross_thread_functional.h"
+#include "third_party/blink/renderer/platform/web_task_runner.h"
+
+namespace blink {
+
+void DeferredTaskHandler::lock() {
+ // Don't allow regular lock in real-time audio thread.
+ DCHECK(!IsAudioThread());
+ context_graph_mutex_.lock();
+}
+
+bool DeferredTaskHandler::TryLock() {
+ // Try to catch cases of using try lock on main thread
+ // - it should use regular lock.
+ DCHECK(IsAudioThread());
+ if (!IsAudioThread()) {
+ // In release build treat tryLock() as lock() (since above
+ // DCHECK(isAudioThread) never fires) - this is the best we can do.
+ lock();
+ return true;
+ }
+ return context_graph_mutex_.TryLock();
+}
+
+void DeferredTaskHandler::unlock() {
+ context_graph_mutex_.unlock();
+}
+
+void DeferredTaskHandler::OfflineLock() {
+ // CHECK is here to make sure to explicitly crash if this is called from
+ // other than the offline render thread, which is considered as the audio
+ // thread in OfflineAudioContext.
+ CHECK(IsAudioThread()) << "DeferredTaskHandler::offlineLock() must be called "
+ "within the offline audio thread.";
+
+ context_graph_mutex_.lock();
+}
+
+bool DeferredTaskHandler::IsGraphOwner() {
+#if DCHECK_IS_ON()
+ return context_graph_mutex_.Locked();
+#else
+ // The method is only used inside of DCHECK() so it must be no-op in the
+ // release build. Returning false so we can catch when it happens.
+ return false;
+#endif
+}
+
+void DeferredTaskHandler::AddDeferredBreakConnection(AudioHandler& node) {
+ DCHECK(IsAudioThread());
+ deferred_break_connection_list_.push_back(&node);
+}
+
+void DeferredTaskHandler::BreakConnections() {
+ DCHECK(IsAudioThread());
+ DCHECK(IsGraphOwner());
+
+ for (unsigned i = 0; i < deferred_break_connection_list_.size(); ++i)
+ deferred_break_connection_list_[i]->BreakConnectionWithLock();
+ deferred_break_connection_list_.clear();
+}
+
+void DeferredTaskHandler::MarkSummingJunctionDirty(
+ AudioSummingJunction* summing_junction) {
+ DCHECK(IsGraphOwner());
+ dirty_summing_junctions_.insert(summing_junction);
+}
+
+void DeferredTaskHandler::RemoveMarkedSummingJunction(
+ AudioSummingJunction* summing_junction) {
+ DCHECK(IsMainThread());
+ GraphAutoLocker locker(*this);
+ dirty_summing_junctions_.erase(summing_junction);
+}
+
+void DeferredTaskHandler::MarkAudioNodeOutputDirty(AudioNodeOutput* output) {
+ DCHECK(IsGraphOwner());
+ DCHECK(IsMainThread());
+ dirty_audio_node_outputs_.insert(output);
+}
+
+void DeferredTaskHandler::RemoveMarkedAudioNodeOutput(AudioNodeOutput* output) {
+ DCHECK(IsGraphOwner());
+ DCHECK(IsMainThread());
+ dirty_audio_node_outputs_.erase(output);
+}
+
+void DeferredTaskHandler::HandleDirtyAudioSummingJunctions() {
+ DCHECK(IsGraphOwner());
+
+ for (AudioSummingJunction* junction : dirty_summing_junctions_)
+ junction->UpdateRenderingState();
+ dirty_summing_junctions_.clear();
+}
+
+void DeferredTaskHandler::HandleDirtyAudioNodeOutputs() {
+ DCHECK(IsGraphOwner());
+
+ HashSet<AudioNodeOutput*> dirty_outputs;
+ dirty_audio_node_outputs_.swap(dirty_outputs);
+
+ // Note: the updating of rendering state may cause output nodes
+ // further down the chain to be marked as dirty. These will not
+ // be processed in this render quantum.
+ for (AudioNodeOutput* output : dirty_outputs)
+ output->UpdateRenderingState();
+}
+
+void DeferredTaskHandler::AddAutomaticPullNode(AudioHandler* node) {
+ DCHECK(IsGraphOwner());
+
+ if (!automatic_pull_nodes_.Contains(node)) {
+ automatic_pull_nodes_.insert(node);
+ automatic_pull_nodes_need_updating_ = true;
+ }
+}
+
+void DeferredTaskHandler::RemoveAutomaticPullNode(AudioHandler* node) {
+ DCHECK(IsGraphOwner());
+
+ if (automatic_pull_nodes_.Contains(node)) {
+ automatic_pull_nodes_.erase(node);
+ automatic_pull_nodes_need_updating_ = true;
+ }
+}
+
+void DeferredTaskHandler::UpdateAutomaticPullNodes() {
+ DCHECK(IsGraphOwner());
+
+ if (automatic_pull_nodes_need_updating_) {
+ CopyToVector(automatic_pull_nodes_, rendering_automatic_pull_nodes_);
+ automatic_pull_nodes_need_updating_ = false;
+ }
+}
+
+void DeferredTaskHandler::ProcessAutomaticPullNodes(size_t frames_to_process) {
+ DCHECK(IsAudioThread());
+
+ for (unsigned i = 0; i < rendering_automatic_pull_nodes_.size(); ++i)
+ rendering_automatic_pull_nodes_[i]->ProcessIfNecessary(frames_to_process);
+}
+
+void DeferredTaskHandler::AddTailProcessingHandler(
+ scoped_refptr<AudioHandler> handler) {
+ DCHECK(IsGraphOwner());
+
+ if (!tail_processing_handlers_.Contains(handler)) {
+#if DEBUG_AUDIONODE_REFERENCES > 1
+ handler->AddTailProcessingDebug();
+#endif
+ tail_processing_handlers_.push_back(handler);
+ }
+}
+
+void DeferredTaskHandler::RemoveTailProcessingHandler(
+ scoped_refptr<AudioHandler> handler,
+ bool disable_outputs) {
+ DCHECK(IsGraphOwner());
+
+ size_t index = tail_processing_handlers_.Find(handler);
+ if (index != kNotFound) {
+#if DEBUG_AUDIONODE_REFERENCES > 1
+ handler->RemoveTailProcessingDebug();
+#endif
+
+ if (disable_outputs) {
+ // Disabling of outputs should happen on the main thread so save this
+ // handler so it can be processed there.
+ finished_tail_processing_handlers_.push_back(handler);
+ }
+ tail_processing_handlers_.EraseAt(index);
+ }
+}
+
+void DeferredTaskHandler::UpdateTailProcessingHandlers() {
+ DCHECK(IsAudioThread());
+
+ for (unsigned k = tail_processing_handlers_.size(); k > 0; --k) {
+ scoped_refptr<AudioHandler> handler = tail_processing_handlers_[k - 1];
+ if (handler->PropagatesSilence()) {
+#if DEBUG_AUDIONODE_REFERENCES
+ fprintf(stderr,
+ "[%16p]: %16p: %2d: updateTail @%.15g (tail = %.15g + %.15g)\n",
+ handler->Context(), handler.get(), handler->GetNodeType(),
+ handler->Context()->currentTime(), handler->TailTime(),
+ handler->LatencyTime());
+#endif
+ RemoveTailProcessingHandler(handler, true);
+ }
+ }
+}
+
+void DeferredTaskHandler::AddChangedChannelCountMode(AudioHandler* node) {
+ DCHECK(IsGraphOwner());
+ DCHECK(IsMainThread());
+ deferred_count_mode_change_.insert(node);
+}
+
+void DeferredTaskHandler::RemoveChangedChannelCountMode(AudioHandler* node) {
+ DCHECK(IsGraphOwner());
+
+ deferred_count_mode_change_.erase(node);
+}
+
+void DeferredTaskHandler::AddChangedChannelInterpretation(AudioHandler* node) {
+ DCHECK(IsGraphOwner());
+ DCHECK(IsMainThread());
+ deferred_channel_interpretation_change_.insert(node);
+}
+
+void DeferredTaskHandler::RemoveChangedChannelInterpretation(
+ AudioHandler* node) {
+ DCHECK(IsGraphOwner());
+
+ deferred_channel_interpretation_change_.erase(node);
+}
+
+void DeferredTaskHandler::UpdateChangedChannelCountMode() {
+ DCHECK(IsGraphOwner());
+
+ for (AudioHandler* node : deferred_count_mode_change_)
+ node->UpdateChannelCountMode();
+ deferred_count_mode_change_.clear();
+}
+
+void DeferredTaskHandler::UpdateChangedChannelInterpretation() {
+ DCHECK(IsGraphOwner());
+
+ for (AudioHandler* node : deferred_channel_interpretation_change_)
+ node->UpdateChannelInterpretation();
+ deferred_channel_interpretation_change_.clear();
+}
+
+DeferredTaskHandler::DeferredTaskHandler()
+ : automatic_pull_nodes_need_updating_(false), audio_thread_(0) {}
+
+scoped_refptr<DeferredTaskHandler> DeferredTaskHandler::Create() {
+ return base::AdoptRef(new DeferredTaskHandler());
+}
+
+DeferredTaskHandler::~DeferredTaskHandler() {
+ DCHECK(!automatic_pull_nodes_.size());
+ if (automatic_pull_nodes_need_updating_)
+ rendering_automatic_pull_nodes_.resize(automatic_pull_nodes_.size());
+ DCHECK(!rendering_automatic_pull_nodes_.size());
+}
+
+void DeferredTaskHandler::HandleDeferredTasks() {
+ UpdateChangedChannelCountMode();
+ UpdateChangedChannelInterpretation();
+ HandleDirtyAudioSummingJunctions();
+ HandleDirtyAudioNodeOutputs();
+ UpdateAutomaticPullNodes();
+ UpdateTailProcessingHandlers();
+}
+
+void DeferredTaskHandler::ContextWillBeDestroyed() {
+ for (auto& handler : rendering_orphan_handlers_)
+ handler->ClearContext();
+ for (auto& handler : deletable_orphan_handlers_)
+ handler->ClearContext();
+ ClearHandlersToBeDeleted();
+ // Some handlers might live because of their cross thread tasks.
+}
+
+DeferredTaskHandler::GraphAutoLocker::GraphAutoLocker(BaseAudioContext* context)
+ : handler_(context->GetDeferredTaskHandler()) {
+ handler_.lock();
+}
+
+DeferredTaskHandler::OfflineGraphAutoLocker::OfflineGraphAutoLocker(
+ OfflineAudioContext* context)
+ : handler_(context->GetDeferredTaskHandler()) {
+ handler_.OfflineLock();
+}
+
+void DeferredTaskHandler::AddRenderingOrphanHandler(
+ scoped_refptr<AudioHandler> handler) {
+ DCHECK(handler);
+ DCHECK(!rendering_orphan_handlers_.Contains(handler));
+ rendering_orphan_handlers_.push_back(std::move(handler));
+}
+
+void DeferredTaskHandler::RequestToDeleteHandlersOnMainThread() {
+ DCHECK(IsGraphOwner());
+ DCHECK(IsAudioThread());
+ if (rendering_orphan_handlers_.IsEmpty())
+ return;
+ deletable_orphan_handlers_.AppendVector(rendering_orphan_handlers_);
+ rendering_orphan_handlers_.clear();
+ PostCrossThreadTask(
+ *Platform::Current()->MainThread()->GetTaskRunner(), FROM_HERE,
+ CrossThreadBind(&DeferredTaskHandler::DeleteHandlersOnMainThread,
+ scoped_refptr<DeferredTaskHandler>(this)));
+}
+
+void DeferredTaskHandler::DeleteHandlersOnMainThread() {
+ DCHECK(IsMainThread());
+ GraphAutoLocker locker(*this);
+ deletable_orphan_handlers_.clear();
+ DisableOutputsForTailProcessing();
+}
+
+void DeferredTaskHandler::ClearHandlersToBeDeleted() {
+ DCHECK(IsMainThread());
+ GraphAutoLocker locker(*this);
+ tail_processing_handlers_.clear();
+ rendering_orphan_handlers_.clear();
+ deletable_orphan_handlers_.clear();
+}
+
+void DeferredTaskHandler::SetAudioThreadToCurrentThread() {
+ DCHECK(!IsMainThread());
+ ThreadIdentifier thread = CurrentThread();
+ ReleaseStore(&audio_thread_, thread);
+}
+
+void DeferredTaskHandler::DisableOutputsForTailProcessing() {
+ DCHECK(IsMainThread());
+ // Tail processing nodes have finished processing their tails so we need to
+ // disable their outputs to indicate to downstream nodes that they're done.
+ // This has to be done in the main thread because DisableOutputs() can cause
+ // summing juctions to go away, which must be done on the main thread.
+ for (auto& handler : finished_tail_processing_handlers_) {
+ handler->DisableOutputs();
+ }
+ finished_tail_processing_handlers_.clear();
+}
+
+void DeferredTaskHandler::FinishTailProcessing() {
+ DCHECK(IsMainThread());
+ // DisableOutputs must run with the graph lock.
+ GraphAutoLocker locker(*this);
+
+ // TODO(crbug.com/832200): Simplify this!
+
+ // |DisableOutputs()| can cause new handlers to start tail processing, which
+ // in turn can cause hte handler to want to disable outputs. For the former
+ // case, the handler is added to |tail_processing_handlers_|. In the latter
+ // case, the handler is added to |finished_tail_processing_handlers_|. So, we
+ // need to loop around until these vectors are completely empty.
+ do {
+ while (tail_processing_handlers_.size() > 0) {
+ // |DisableOutputs()| can modify |tail_processing_handlers_|, so
+ // swap it out before processing it. And keep running this until
+ // nothing gets added to |tail_processing_handlers_|.
+ Vector<scoped_refptr<AudioHandler>> handlers_to_be_disabled;
+
+ handlers_to_be_disabled.swap(tail_processing_handlers_);
+ for (auto& handler : handlers_to_be_disabled)
+ handler->DisableOutputs();
+ }
+ DisableOutputsForTailProcessing();
+ } while (tail_processing_handlers_.size() > 0 ||
+ finished_tail_processing_handlers_.size() > 0);
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/deferred_task_handler.h b/chromium/third_party/blink/renderer/modules/webaudio/deferred_task_handler.h
new file mode 100644
index 00000000000..2db9eb1a637
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/deferred_task_handler.h
@@ -0,0 +1,237 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_DEFERRED_TASK_HANDLER_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_DEFERRED_TASK_HANDLER_H_
+
+#include "base/memory/scoped_refptr.h"
+#include "third_party/blink/renderer/modules/modules_export.h"
+#include "third_party/blink/renderer/platform/heap/handle.h"
+#include "third_party/blink/renderer/platform/wtf/hash_set.h"
+#include "third_party/blink/renderer/platform/wtf/thread_safe_ref_counted.h"
+#include "third_party/blink/renderer/platform/wtf/threading.h"
+#include "third_party/blink/renderer/platform/wtf/threading_primitives.h"
+#include "third_party/blink/renderer/platform/wtf/vector.h"
+
+namespace blink {
+
+class BaseAudioContext;
+class OfflineAudioContext;
+class AudioHandler;
+class AudioNodeOutput;
+class AudioSummingJunction;
+
+// DeferredTaskHandler manages the major part of pre- and post- rendering tasks,
+// and provides a lock mechanism against the audio rendering graph. A
+// DeferredTaskHandler object is created when an BaseAudioContext object is
+// created.
+//
+// DeferredTaskHandler outlives the BaseAudioContext only if all of the
+// following conditions match:
+// - An audio rendering thread is running,
+// - It is requested to stop,
+// - The audio rendering thread calls requestToDeleteHandlersOnMainThread(),
+// - It posts a task of deleteHandlersOnMainThread(), and
+// - GC happens and it collects the BaseAudioContext before the task execution.
+//
+class MODULES_EXPORT DeferredTaskHandler final
+ : public ThreadSafeRefCounted<DeferredTaskHandler> {
+ public:
+ static scoped_refptr<DeferredTaskHandler> Create();
+ ~DeferredTaskHandler();
+
+ void HandleDeferredTasks();
+ void ContextWillBeDestroyed();
+
+ // BaseAudioContext can pull node(s) at the end of each render quantum even
+ // when they are not connected to any downstream nodes. These two methods are
+ // called by the nodes who want to add/remove themselves into/from the
+ // automatic pull lists.
+ void AddAutomaticPullNode(AudioHandler*);
+ void RemoveAutomaticPullNode(AudioHandler*);
+ // Called right before handlePostRenderTasks() to handle nodes which need to
+ // be pulled even when they are not connected to anything.
+ void ProcessAutomaticPullNodes(size_t frames_to_process);
+
+ // Keep track of AudioNode's that have their channel count mode changed. We
+ // process the changes in the post rendering phase.
+ void AddChangedChannelCountMode(AudioHandler*);
+ void RemoveChangedChannelCountMode(AudioHandler*);
+
+ // Keep track of AudioNode's that have their channel interpretation
+ // changed. We process the changes in the post rendering phase.
+ void AddChangedChannelInterpretation(AudioHandler*);
+ void RemoveChangedChannelInterpretation(AudioHandler*);
+
+ // Only accessed when the graph lock is held.
+ void MarkSummingJunctionDirty(AudioSummingJunction*);
+ // Only accessed when the graph lock is held. Must be called on the main
+ // thread.
+ void RemoveMarkedSummingJunction(AudioSummingJunction*);
+
+ void MarkAudioNodeOutputDirty(AudioNodeOutput*);
+ void RemoveMarkedAudioNodeOutput(AudioNodeOutput*);
+
+ // In AudioNode::breakConnection() and deref(), a tryLock() is used for
+ // calling actual processing, but if it fails keep track here.
+ void AddDeferredBreakConnection(AudioHandler&);
+ void BreakConnections();
+
+ void AddRenderingOrphanHandler(scoped_refptr<AudioHandler>);
+ void RequestToDeleteHandlersOnMainThread();
+ void ClearHandlersToBeDeleted();
+
+ // If |node| requires tail processing, add it to the list of tail
+ // nodes so the tail is processed.
+ void AddTailProcessingHandler(scoped_refptr<AudioHandler>);
+
+ // Remove |node| from the list of tail nodes (because the tail processing is
+ // complete). Set |disable_outputs| to true if the outputs of the handler
+ // should also be disabled. This should be true if the tail is done. But if
+ // we're reconnected or re-enabled, then |disable_outputs| should be false.
+ void RemoveTailProcessingHandler(scoped_refptr<AudioHandler> node,
+ bool disable_outputs);
+
+ // Remove all tail processing nodes. Should be called only when the
+ // context is done.
+ void FinishTailProcessing();
+
+ // For handlers that have finished processing their tail and require disabling
+ // the ouputs of the handler, we do that here.
+ void DisableOutputsForTailProcessing();
+
+ //
+ // Thread Safety and Graph Locking:
+ //
+ void SetAudioThreadToCurrentThread();
+ ThreadIdentifier AudioThread() const { return AcquireLoad(&audio_thread_); }
+
+ // TODO(hongchan): Use no-barrier load here. (crbug.com/247328)
+ //
+ // It is okay to use a relaxed (no-barrier) load here. Because the data
+ // referenced by m_audioThread is not actually being used, thus we do not
+ // need a barrier between the load of m_audioThread and of that data.
+ bool IsAudioThread() const {
+ return CurrentThread() == AcquireLoad(&audio_thread_);
+ }
+
+ void lock();
+ bool TryLock();
+ void unlock();
+
+ // This locks the audio render thread for OfflineAudioContext rendering.
+ // MUST NOT be used in the real-time audio context.
+ void OfflineLock();
+
+ // Returns true if this thread owns the context's lock.
+ bool IsGraphOwner();
+
+ class MODULES_EXPORT GraphAutoLocker {
+ STACK_ALLOCATED();
+
+ public:
+ explicit GraphAutoLocker(DeferredTaskHandler& handler) : handler_(handler) {
+ handler_.lock();
+ }
+ explicit GraphAutoLocker(BaseAudioContext*);
+
+ ~GraphAutoLocker() { handler_.unlock(); }
+
+ private:
+ DeferredTaskHandler& handler_;
+ };
+
+ // This is for locking offline render thread (which is considered as the
+ // audio thread) with unlocking on self-destruction at the end of the scope.
+ // Also note that it uses lock() rather than tryLock() because the timing
+ // MUST be accurate on offline rendering.
+ class MODULES_EXPORT OfflineGraphAutoLocker {
+ STACK_ALLOCATED();
+
+ public:
+ explicit OfflineGraphAutoLocker(OfflineAudioContext*);
+
+ ~OfflineGraphAutoLocker() { handler_.unlock(); }
+
+ private:
+ DeferredTaskHandler& handler_;
+ };
+
+ private:
+ DeferredTaskHandler();
+ void UpdateAutomaticPullNodes();
+ void UpdateChangedChannelCountMode();
+ void UpdateChangedChannelInterpretation();
+ void HandleDirtyAudioSummingJunctions();
+ void HandleDirtyAudioNodeOutputs();
+ void DeleteHandlersOnMainThread();
+
+ // Check tail processing handlers and remove any handler if the tail
+ // has been processed.
+ void UpdateTailProcessingHandlers();
+
+ // For the sake of thread safety, we maintain a seperate Vector of automatic
+ // pull nodes for rendering in m_renderingAutomaticPullNodes. It will be
+ // copied from m_automaticPullNodes by updateAutomaticPullNodes() at the
+ // very start or end of the rendering quantum.
+ HashSet<AudioHandler*> automatic_pull_nodes_;
+ Vector<AudioHandler*> rendering_automatic_pull_nodes_;
+ // m_automaticPullNodesNeedUpdating keeps track if m_automaticPullNodes is
+ // modified.
+ bool automatic_pull_nodes_need_updating_;
+
+ // Collection of nodes where the channel count mode has changed. We want the
+ // channel count mode to change in the pre- or post-rendering phase so as
+ // not to disturb the running audio thread.
+ HashSet<AudioHandler*> deferred_count_mode_change_;
+
+ HashSet<AudioHandler*> deferred_channel_interpretation_change_;
+
+ // These two HashSet must be accessed only when the graph lock is held.
+ // These raw pointers are safe because their destructors unregister them.
+ HashSet<AudioSummingJunction*> dirty_summing_junctions_;
+ HashSet<AudioNodeOutput*> dirty_audio_node_outputs_;
+
+ // Only accessed in the audio thread.
+ Vector<AudioHandler*> deferred_break_connection_list_;
+
+ Vector<scoped_refptr<AudioHandler>> rendering_orphan_handlers_;
+ Vector<scoped_refptr<AudioHandler>> deletable_orphan_handlers_;
+
+ // Nodes that are processing its tail.
+ Vector<scoped_refptr<AudioHandler>> tail_processing_handlers_;
+ // Tail processing nodes that are now finished and want the output to be
+ // disabled. This is updated in the audio thread (with the graph lock). The
+ // main thread will disable the outputs.
+ Vector<scoped_refptr<AudioHandler>> finished_tail_processing_handlers_;
+
+ // Graph locking.
+ RecursiveMutex context_graph_mutex_;
+ volatile ThreadIdentifier audio_thread_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_DEFERRED_TASK_HANDLER_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/delay_dsp_kernel.cc b/chromium/third_party/blink/renderer/modules/webaudio/delay_dsp_kernel.cc
new file mode 100644
index 00000000000..0d50d42071d
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/delay_dsp_kernel.cc
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#include <algorithm>
+#include "third_party/blink/renderer/modules/webaudio/delay_dsp_kernel.h"
+#include "third_party/blink/renderer/platform/audio/audio_utilities.h"
+#include "third_party/blink/renderer/platform/wtf/math_extras.h"
+
+namespace blink {
+
+DelayDSPKernel::DelayDSPKernel(DelayProcessor* processor)
+ : AudioDelayDSPKernel(processor, AudioUtilities::kRenderQuantumFrames) {
+ DCHECK(processor);
+ DCHECK_GT(processor->SampleRate(), 0);
+ if (!(processor && processor->SampleRate() > 0))
+ return;
+
+ max_delay_time_ = processor->MaxDelayTime();
+ DCHECK_GE(max_delay_time_, 0);
+ DCHECK(!std::isnan(max_delay_time_));
+ if (max_delay_time_ < 0 || std::isnan(max_delay_time_))
+ return;
+
+ buffer_.Allocate(
+ BufferLengthForDelay(max_delay_time_, processor->SampleRate()));
+ buffer_.Zero();
+}
+
+bool DelayDSPKernel::HasSampleAccurateValues() {
+ return GetDelayProcessor()->DelayTime().HasSampleAccurateValues();
+}
+
+void DelayDSPKernel::CalculateSampleAccurateValues(float* delay_times,
+ size_t frames_to_process) {
+ GetDelayProcessor()->DelayTime().CalculateSampleAccurateValues(
+ delay_times, frames_to_process);
+}
+
+double DelayDSPKernel::DelayTime(float) {
+ return GetDelayProcessor()->DelayTime().FinalValue();
+}
+
+void DelayDSPKernel::ProcessOnlyAudioParams(size_t frames_to_process) {
+ DCHECK_LE(frames_to_process, AudioUtilities::kRenderQuantumFrames);
+
+ float values[AudioUtilities::kRenderQuantumFrames];
+
+ GetDelayProcessor()->DelayTime().CalculateSampleAccurateValues(
+ values, frames_to_process);
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/delay_dsp_kernel.h b/chromium/third_party/blink/renderer/modules/webaudio/delay_dsp_kernel.h
new file mode 100644
index 00000000000..8ba48b472d5
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/delay_dsp_kernel.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_DELAY_DSP_KERNEL_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_DELAY_DSP_KERNEL_H_
+
+#include "third_party/blink/renderer/modules/webaudio/delay_processor.h"
+#include "third_party/blink/renderer/platform/audio/audio_delay_dsp_kernel.h"
+
+namespace blink {
+
+class DelayProcessor;
+
+class DelayDSPKernel final : public AudioDelayDSPKernel {
+ public:
+ explicit DelayDSPKernel(DelayProcessor*);
+
+ protected:
+ bool HasSampleAccurateValues() override;
+ void CalculateSampleAccurateValues(float* delay_times,
+ size_t frames_to_process) override;
+ double DelayTime(float sample_rate) override;
+
+ void ProcessOnlyAudioParams(size_t frames_to_process) override;
+
+ private:
+ DelayProcessor* GetDelayProcessor() {
+ return static_cast<DelayProcessor*>(Processor());
+ }
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_DELAY_DSP_KERNEL_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/delay_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/delay_node.cc
new file mode 100644
index 00000000000..be223789ead
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/delay_node.cc
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#include "third_party/blink/renderer/modules/webaudio/delay_node.h"
+
+#include <memory>
+
+#include "third_party/blink/renderer/bindings/core/v8/exception_messages.h"
+#include "third_party/blink/renderer/bindings/core/v8/exception_state.h"
+#include "third_party/blink/renderer/core/dom/exception_code.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_basic_processor_handler.h"
+#include "third_party/blink/renderer/modules/webaudio/delay_options.h"
+#include "third_party/blink/renderer/modules/webaudio/delay_processor.h"
+#include "third_party/blink/renderer/platform/wtf/math_extras.h"
+
+namespace blink {
+
+const double kMaximumAllowedDelayTime = 180;
+
+DelayHandler::DelayHandler(AudioNode& node,
+ float sample_rate,
+ AudioParamHandler& delay_time,
+ double max_delay_time)
+ : AudioBasicProcessorHandler(
+ kNodeTypeDelay,
+ node,
+ sample_rate,
+ std::make_unique<DelayProcessor>(sample_rate,
+ 1,
+ delay_time,
+ max_delay_time)) {
+ // Initialize the handler so that AudioParams can be processed.
+ Initialize();
+}
+
+scoped_refptr<DelayHandler> DelayHandler::Create(AudioNode& node,
+ float sample_rate,
+ AudioParamHandler& delay_time,
+ double max_delay_time) {
+ return base::AdoptRef(
+ new DelayHandler(node, sample_rate, delay_time, max_delay_time));
+}
+
+DelayNode::DelayNode(BaseAudioContext& context, double max_delay_time)
+ : AudioNode(context),
+ delay_time_(AudioParam::Create(context,
+ kParamTypeDelayDelayTime,
+ "Delay.delayTime",
+ 0.0,
+ 0.0,
+ max_delay_time)) {
+ SetHandler(DelayHandler::Create(*this, context.sampleRate(),
+ delay_time_->Handler(), max_delay_time));
+}
+
+DelayNode* DelayNode::Create(BaseAudioContext& context,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ // The default maximum delay time for the delay node is 1 sec.
+ return Create(context, 1, exception_state);
+}
+
+DelayNode* DelayNode::Create(BaseAudioContext& context,
+ double max_delay_time,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ if (context.IsContextClosed()) {
+ context.ThrowExceptionForClosedState(exception_state);
+ return nullptr;
+ }
+
+ if (max_delay_time <= 0 || max_delay_time >= kMaximumAllowedDelayTime) {
+ exception_state.ThrowDOMException(
+ kNotSupportedError,
+ ExceptionMessages::IndexOutsideRange(
+ "max delay time", max_delay_time, 0.0,
+ ExceptionMessages::kExclusiveBound, kMaximumAllowedDelayTime,
+ ExceptionMessages::kExclusiveBound));
+ return nullptr;
+ }
+
+ return new DelayNode(context, max_delay_time);
+}
+
+DelayNode* DelayNode::Create(BaseAudioContext* context,
+ const DelayOptions& options,
+ ExceptionState& exception_state) {
+ // maxDelayTime has a default value specified.
+ DelayNode* node = Create(*context, options.maxDelayTime(), exception_state);
+
+ if (!node)
+ return nullptr;
+
+ node->HandleChannelOptions(options, exception_state);
+
+ node->delayTime()->setValue(options.delayTime());
+
+ return node;
+}
+
+AudioParam* DelayNode::delayTime() {
+ return delay_time_;
+}
+
+void DelayNode::Trace(blink::Visitor* visitor) {
+ visitor->Trace(delay_time_);
+ AudioNode::Trace(visitor);
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/delay_node.h b/chromium/third_party/blink/renderer/modules/webaudio/delay_node.h
new file mode 100644
index 00000000000..3470a236aab
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/delay_node.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_DELAY_NODE_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_DELAY_NODE_H_
+
+#include "third_party/blink/renderer/modules/webaudio/audio_basic_processor_handler.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node.h"
+
+namespace blink {
+
+class AudioParamHandler;
+class BaseAudioContext;
+class DelayOptions;
+class ExceptionState;
+
+class DelayHandler : public AudioBasicProcessorHandler {
+ public:
+ static scoped_refptr<DelayHandler> Create(AudioNode&,
+ float sample_rate,
+ AudioParamHandler& delay_time,
+ double max_delay_time);
+
+ private:
+ DelayHandler(AudioNode&,
+ float sample_rate,
+ AudioParamHandler& delay_time,
+ double max_delay_time);
+};
+
+class DelayNode final : public AudioNode {
+ DEFINE_WRAPPERTYPEINFO();
+
+ public:
+ static DelayNode* Create(BaseAudioContext&, ExceptionState&);
+ static DelayNode* Create(BaseAudioContext&,
+ double max_delay_time,
+ ExceptionState&);
+ static DelayNode* Create(BaseAudioContext*,
+ const DelayOptions&,
+ ExceptionState&);
+ virtual void Trace(blink::Visitor*);
+ AudioParam* delayTime();
+
+ private:
+ DelayNode(BaseAudioContext&, double max_delay_time);
+
+ Member<AudioParam> delay_time_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_DELAY_NODE_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/delay_node.idl b/chromium/third_party/blink/renderer/modules/webaudio/delay_node.idl
new file mode 100644
index 00000000000..cc9d24c7642
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/delay_node.idl
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+// See https://webaudio.github.io/web-audio-api/#DelayNode
+[
+ Constructor(BaseAudioContext context, optional DelayOptions options),
+ RaisesException=Constructor,
+ Measure
+]
+interface DelayNode : AudioNode {
+ readonly attribute AudioParam delayTime;
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/delay_options.idl b/chromium/third_party/blink/renderer/modules/webaudio/delay_options.idl
new file mode 100644
index 00000000000..d2c89651f66
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/delay_options.idl
@@ -0,0 +1,9 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See https://webaudio.github.io/web-audio-api/#dictdef-delayoptions
+dictionary DelayOptions : AudioNodeOptions {
+ double maxDelayTime = 1;
+ double delayTime = 0;
+}; \ No newline at end of file
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/delay_processor.cc b/chromium/third_party/blink/renderer/modules/webaudio/delay_processor.cc
new file mode 100644
index 00000000000..491c68315ea
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/delay_processor.cc
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#include <memory>
+#include "third_party/blink/renderer/modules/webaudio/delay_dsp_kernel.h"
+#include "third_party/blink/renderer/modules/webaudio/delay_processor.h"
+#include "third_party/blink/renderer/platform/audio/audio_utilities.h"
+
+namespace blink {
+
+DelayProcessor::DelayProcessor(float sample_rate,
+ unsigned number_of_channels,
+ AudioParamHandler& delay_time,
+ double max_delay_time)
+ : AudioDSPKernelProcessor(sample_rate, number_of_channels),
+ delay_time_(&delay_time),
+ max_delay_time_(max_delay_time) {}
+
+DelayProcessor::~DelayProcessor() {
+ if (IsInitialized())
+ Uninitialize();
+}
+
+std::unique_ptr<AudioDSPKernel> DelayProcessor::CreateKernel() {
+ return std::make_unique<DelayDSPKernel>(this);
+}
+
+void DelayProcessor::ProcessOnlyAudioParams(size_t frames_to_process) {
+ DCHECK_LE(frames_to_process, AudioUtilities::kRenderQuantumFrames);
+
+ float values[AudioUtilities::kRenderQuantumFrames];
+
+ delay_time_->CalculateSampleAccurateValues(values, frames_to_process);
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/delay_processor.h b/chromium/third_party/blink/renderer/modules/webaudio/delay_processor.h
new file mode 100644
index 00000000000..e4ab174a13e
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/delay_processor.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_DELAY_PROCESSOR_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_DELAY_PROCESSOR_H_
+
+#include <memory>
+#include "base/memory/scoped_refptr.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_param.h"
+#include "third_party/blink/renderer/platform/audio/audio_dsp_kernel_processor.h"
+
+namespace blink {
+
+class AudioDSPKernel;
+
+class DelayProcessor final : public AudioDSPKernelProcessor {
+ public:
+ DelayProcessor(float sample_rate,
+ unsigned number_of_channels,
+ AudioParamHandler& delay_time,
+ double max_delay_time);
+ ~DelayProcessor() override;
+
+ std::unique_ptr<AudioDSPKernel> CreateKernel() override;
+
+ void ProcessOnlyAudioParams(size_t frames_to_process) override;
+
+ AudioParamHandler& DelayTime() const { return *delay_time_; }
+ double MaxDelayTime() { return max_delay_time_; }
+
+ private:
+ scoped_refptr<AudioParamHandler> delay_time_;
+ double max_delay_time_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_DELAY_PROCESSOR_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/dynamics_compressor_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/dynamics_compressor_node.cc
new file mode 100644
index 00000000000..31e811c5372
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/dynamics_compressor_node.cc
@@ -0,0 +1,297 @@
+/*
+ * Copyright (C) 2011, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#include "third_party/blink/renderer/bindings/core/v8/exception_messages.h"
+#include "third_party/blink/renderer/bindings/core/v8/exception_state.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node_input.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node_output.h"
+#include "third_party/blink/renderer/modules/webaudio/dynamics_compressor_node.h"
+#include "third_party/blink/renderer/modules/webaudio/dynamics_compressor_options.h"
+#include "third_party/blink/renderer/platform/audio/audio_utilities.h"
+#include "third_party/blink/renderer/platform/audio/dynamics_compressor.h"
+
+// Set output to stereo by default.
+static const unsigned defaultNumberOfOutputChannels = 2;
+
+namespace blink {
+
+DynamicsCompressorHandler::DynamicsCompressorHandler(
+ AudioNode& node,
+ float sample_rate,
+ AudioParamHandler& threshold,
+ AudioParamHandler& knee,
+ AudioParamHandler& ratio,
+ AudioParamHandler& attack,
+ AudioParamHandler& release)
+ : AudioHandler(kNodeTypeDynamicsCompressor, node, sample_rate),
+ threshold_(&threshold),
+ knee_(&knee),
+ ratio_(&ratio),
+ reduction_(0),
+ attack_(&attack),
+ release_(&release) {
+ AddInput();
+ AddOutput(defaultNumberOfOutputChannels);
+ Initialize();
+}
+
+scoped_refptr<DynamicsCompressorHandler> DynamicsCompressorHandler::Create(
+ AudioNode& node,
+ float sample_rate,
+ AudioParamHandler& threshold,
+ AudioParamHandler& knee,
+ AudioParamHandler& ratio,
+ AudioParamHandler& attack,
+ AudioParamHandler& release) {
+ return base::AdoptRef(new DynamicsCompressorHandler(
+ node, sample_rate, threshold, knee, ratio, attack, release));
+}
+
+DynamicsCompressorHandler::~DynamicsCompressorHandler() {
+ Uninitialize();
+}
+
+void DynamicsCompressorHandler::Process(size_t frames_to_process) {
+ AudioBus* output_bus = Output(0).Bus();
+ DCHECK(output_bus);
+
+ float threshold = threshold_->Value();
+ float knee = knee_->Value();
+ float ratio = ratio_->Value();
+ float attack = attack_->Value();
+ float release = release_->Value();
+
+ dynamics_compressor_->SetParameterValue(DynamicsCompressor::kParamThreshold,
+ threshold);
+ dynamics_compressor_->SetParameterValue(DynamicsCompressor::kParamKnee, knee);
+ dynamics_compressor_->SetParameterValue(DynamicsCompressor::kParamRatio,
+ ratio);
+ dynamics_compressor_->SetParameterValue(DynamicsCompressor::kParamAttack,
+ attack);
+ dynamics_compressor_->SetParameterValue(DynamicsCompressor::kParamRelease,
+ release);
+
+ dynamics_compressor_->Process(Input(0).Bus(), output_bus, frames_to_process);
+
+ reduction_ =
+ dynamics_compressor_->ParameterValue(DynamicsCompressor::kParamReduction);
+}
+
+void DynamicsCompressorHandler::ProcessOnlyAudioParams(
+ size_t frames_to_process) {
+ DCHECK(Context()->IsAudioThread());
+ DCHECK_LE(frames_to_process, AudioUtilities::kRenderQuantumFrames);
+
+ float values[AudioUtilities::kRenderQuantumFrames];
+
+ threshold_->CalculateSampleAccurateValues(values, frames_to_process);
+ knee_->CalculateSampleAccurateValues(values, frames_to_process);
+ ratio_->CalculateSampleAccurateValues(values, frames_to_process);
+ attack_->CalculateSampleAccurateValues(values, frames_to_process);
+ release_->CalculateSampleAccurateValues(values, frames_to_process);
+}
+
+void DynamicsCompressorHandler::Initialize() {
+ if (IsInitialized())
+ return;
+
+ AudioHandler::Initialize();
+ dynamics_compressor_ = std::make_unique<DynamicsCompressor>(
+ Context()->sampleRate(), defaultNumberOfOutputChannels);
+}
+
+bool DynamicsCompressorHandler::RequiresTailProcessing() const {
+ // Always return true even if the tail time and latency might both be zero.
+ return true;
+}
+
+double DynamicsCompressorHandler::TailTime() const {
+ return dynamics_compressor_->TailTime();
+}
+
+double DynamicsCompressorHandler::LatencyTime() const {
+ return dynamics_compressor_->LatencyTime();
+}
+
+void DynamicsCompressorHandler::SetChannelCount(
+ unsigned long channel_count,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+ BaseAudioContext::GraphAutoLocker locker(Context());
+
+ // A DynamicsCompressorNode only supports 1 or 2 channels
+ if (channel_count > 0 && channel_count <= 2) {
+ if (channel_count_ != channel_count) {
+ channel_count_ = channel_count;
+ if (InternalChannelCountMode() != kMax)
+ UpdateChannelsForInputs();
+ }
+ } else {
+ exception_state.ThrowDOMException(
+ kNotSupportedError, ExceptionMessages::IndexOutsideRange<unsigned long>(
+ "channelCount", channel_count, 1,
+ ExceptionMessages::kInclusiveBound, 2,
+ ExceptionMessages::kInclusiveBound));
+ }
+}
+
+void DynamicsCompressorHandler::SetChannelCountMode(
+ const String& mode,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+ BaseAudioContext::GraphAutoLocker locker(Context());
+
+ ChannelCountMode old_mode = InternalChannelCountMode();
+
+ if (mode == "clamped-max") {
+ new_channel_count_mode_ = kClampedMax;
+ } else if (mode == "explicit") {
+ new_channel_count_mode_ = kExplicit;
+ } else if (mode == "max") {
+ // This is not supported for a DynamicsCompressorNode, which can
+ // only handle 1 or 2 channels.
+ exception_state.ThrowDOMException(kNotSupportedError,
+ "The provided value 'max' is not an "
+ "allowed value for ChannelCountMode");
+ new_channel_count_mode_ = old_mode;
+ } else {
+ // Do nothing for other invalid values.
+ new_channel_count_mode_ = old_mode;
+ }
+
+ if (new_channel_count_mode_ != old_mode)
+ Context()->GetDeferredTaskHandler().AddChangedChannelCountMode(this);
+}
+// ----------------------------------------------------------------
+
+DynamicsCompressorNode::DynamicsCompressorNode(BaseAudioContext& context)
+ : AudioNode(context),
+ threshold_(AudioParam::Create(context,
+ kParamTypeDynamicsCompressorThreshold,
+ "DynamicsCompressor.threshold",
+ -24,
+ -100,
+ 0)),
+ knee_(AudioParam::Create(context,
+ kParamTypeDynamicsCompressorKnee,
+ "DynamicsCompressor.knee",
+ 30,
+ 0,
+ 40)),
+ ratio_(AudioParam::Create(context,
+ kParamTypeDynamicsCompressorRatio,
+ "DynamicsCompressor.ratio",
+ 12,
+ 1,
+ 20)),
+ attack_(AudioParam::Create(context,
+ kParamTypeDynamicsCompressorAttack,
+ "DynamicsCompressor.attack",
+ 0.003,
+ 0,
+ 1)),
+ release_(AudioParam::Create(context,
+ kParamTypeDynamicsCompressorRelease,
+ "DynamicsCompressor.release",
+ 0.250,
+ 0,
+ 1)) {
+ SetHandler(DynamicsCompressorHandler::Create(
+ *this, context.sampleRate(), threshold_->Handler(), knee_->Handler(),
+ ratio_->Handler(), attack_->Handler(), release_->Handler()));
+}
+
+DynamicsCompressorNode* DynamicsCompressorNode::Create(
+ BaseAudioContext& context,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ if (context.IsContextClosed()) {
+ context.ThrowExceptionForClosedState(exception_state);
+ return nullptr;
+ }
+
+ return new DynamicsCompressorNode(context);
+}
+
+DynamicsCompressorNode* DynamicsCompressorNode::Create(
+ BaseAudioContext* context,
+ const DynamicsCompressorOptions& options,
+ ExceptionState& exception_state) {
+ DynamicsCompressorNode* node = Create(*context, exception_state);
+
+ if (!node)
+ return nullptr;
+
+ node->HandleChannelOptions(options, exception_state);
+
+ node->attack()->setValue(options.attack());
+ node->knee()->setValue(options.knee());
+ node->ratio()->setValue(options.ratio());
+ node->release()->setValue(options.release());
+ node->threshold()->setValue(options.threshold());
+
+ return node;
+}
+
+void DynamicsCompressorNode::Trace(blink::Visitor* visitor) {
+ visitor->Trace(threshold_);
+ visitor->Trace(knee_);
+ visitor->Trace(ratio_);
+ visitor->Trace(attack_);
+ visitor->Trace(release_);
+ AudioNode::Trace(visitor);
+}
+
+DynamicsCompressorHandler&
+DynamicsCompressorNode::GetDynamicsCompressorHandler() const {
+ return static_cast<DynamicsCompressorHandler&>(Handler());
+}
+
+AudioParam* DynamicsCompressorNode::threshold() const {
+ return threshold_;
+}
+
+AudioParam* DynamicsCompressorNode::knee() const {
+ return knee_;
+}
+
+AudioParam* DynamicsCompressorNode::ratio() const {
+ return ratio_;
+}
+
+float DynamicsCompressorNode::reduction() const {
+ return GetDynamicsCompressorHandler().ReductionValue();
+}
+
+AudioParam* DynamicsCompressorNode::attack() const {
+ return attack_;
+}
+
+AudioParam* DynamicsCompressorNode::release() const {
+ return release_;
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/dynamics_compressor_node.h b/chromium/third_party/blink/renderer/modules/webaudio/dynamics_compressor_node.h
new file mode 100644
index 00000000000..3f5a4b1075f
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/dynamics_compressor_node.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2011, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_DYNAMICS_COMPRESSOR_NODE_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_DYNAMICS_COMPRESSOR_NODE_H_
+
+#include <memory>
+#include "base/gtest_prod_util.h"
+#include "third_party/blink/renderer/modules/modules_export.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_param.h"
+
+namespace blink {
+
+class BaseAudioContext;
+class DynamicsCompressor;
+class DynamicsCompressorOptions;
+
+class MODULES_EXPORT DynamicsCompressorHandler final : public AudioHandler {
+ public:
+ static scoped_refptr<DynamicsCompressorHandler> Create(
+ AudioNode&,
+ float sample_rate,
+ AudioParamHandler& threshold,
+ AudioParamHandler& knee,
+ AudioParamHandler& ratio,
+ AudioParamHandler& attack,
+ AudioParamHandler& release);
+
+ ~DynamicsCompressorHandler();
+
+ // AudioHandler
+ void Process(size_t frames_to_process) override;
+ void ProcessOnlyAudioParams(size_t frames_to_process) override;
+ void Initialize() override;
+
+ float ReductionValue() const { return reduction_; }
+
+ void SetChannelCount(unsigned long, ExceptionState&) final;
+ void SetChannelCountMode(const String&, ExceptionState&) final;
+
+ private:
+ DynamicsCompressorHandler(AudioNode&,
+ float sample_rate,
+ AudioParamHandler& threshold,
+ AudioParamHandler& knee,
+ AudioParamHandler& ratio,
+ AudioParamHandler& attack,
+ AudioParamHandler& release);
+ bool RequiresTailProcessing() const final;
+ double TailTime() const override;
+ double LatencyTime() const override;
+
+ std::unique_ptr<DynamicsCompressor> dynamics_compressor_;
+ scoped_refptr<AudioParamHandler> threshold_;
+ scoped_refptr<AudioParamHandler> knee_;
+ scoped_refptr<AudioParamHandler> ratio_;
+ float reduction_;
+ scoped_refptr<AudioParamHandler> attack_;
+ scoped_refptr<AudioParamHandler> release_;
+
+ FRIEND_TEST_ALL_PREFIXES(DynamicsCompressorNodeTest, ProcessorLifetime);
+};
+
+class MODULES_EXPORT DynamicsCompressorNode final : public AudioNode {
+ DEFINE_WRAPPERTYPEINFO();
+
+ public:
+ static DynamicsCompressorNode* Create(BaseAudioContext&, ExceptionState&);
+ static DynamicsCompressorNode* Create(BaseAudioContext*,
+ const DynamicsCompressorOptions&,
+ ExceptionState&);
+ virtual void Trace(blink::Visitor*);
+
+ AudioParam* threshold() const;
+ AudioParam* knee() const;
+ AudioParam* ratio() const;
+ float reduction() const;
+ AudioParam* attack() const;
+ AudioParam* release() const;
+
+ private:
+ DynamicsCompressorNode(BaseAudioContext&);
+ DynamicsCompressorHandler& GetDynamicsCompressorHandler() const;
+
+ Member<AudioParam> threshold_;
+ Member<AudioParam> knee_;
+ Member<AudioParam> ratio_;
+ Member<AudioParam> attack_;
+ Member<AudioParam> release_;
+
+ FRIEND_TEST_ALL_PREFIXES(DynamicsCompressorNodeTest, ProcessorLifetime);
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_DYNAMICS_COMPRESSOR_NODE_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/dynamics_compressor_node.idl b/chromium/third_party/blink/renderer/modules/webaudio/dynamics_compressor_node.idl
new file mode 100644
index 00000000000..41c9008b009
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/dynamics_compressor_node.idl
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2011, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+// See https://webaudio.github.io/web-audio-api/#dynamicscompressornode
+[
+ Constructor(BaseAudioContext context, optional DynamicsCompressorOptions options),
+ RaisesException=Constructor,
+ Measure
+]
+interface DynamicsCompressorNode : AudioNode {
+ readonly attribute AudioParam threshold; // in Decibels
+ readonly attribute AudioParam knee; // in Decibels
+ readonly attribute AudioParam ratio; // unit-less
+ readonly attribute float reduction; // in Decibels
+ readonly attribute AudioParam attack; // in Seconds
+ readonly attribute AudioParam release; // in Seconds
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/dynamics_compressor_node_test.cc b/chromium/third_party/blink/renderer/modules/webaudio/dynamics_compressor_node_test.cc
new file mode 100644
index 00000000000..30590d71b18
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/dynamics_compressor_node_test.cc
@@ -0,0 +1,29 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <memory>
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/blink/renderer/core/testing/dummy_page_holder.h"
+#include "third_party/blink/renderer/modules/webaudio/base_audio_context.h"
+#include "third_party/blink/renderer/modules/webaudio/dynamics_compressor_node.h"
+#include "third_party/blink/renderer/modules/webaudio/offline_audio_context.h"
+
+namespace blink {
+
+TEST(DynamicsCompressorNodeTest, ProcessorLifetime) {
+ std::unique_ptr<DummyPageHolder> page = DummyPageHolder::Create();
+ OfflineAudioContext* context = OfflineAudioContext::Create(
+ &page->GetDocument(), 2, 1, 48000, ASSERT_NO_EXCEPTION);
+ DynamicsCompressorNode* node =
+ context->createDynamicsCompressor(ASSERT_NO_EXCEPTION);
+ DynamicsCompressorHandler& handler = node->GetDynamicsCompressorHandler();
+ EXPECT_TRUE(handler.dynamics_compressor_);
+ BaseAudioContext::GraphAutoLocker locker(context);
+ handler.Dispose();
+ // m_dynamicsCompressor should live after dispose() because an audio thread
+ // is using it.
+ EXPECT_TRUE(handler.dynamics_compressor_);
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/dynamics_compressor_options.idl b/chromium/third_party/blink/renderer/modules/webaudio/dynamics_compressor_options.idl
new file mode 100644
index 00000000000..0bdd0ce6597
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/dynamics_compressor_options.idl
@@ -0,0 +1,12 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See https://webaudio.github.io/web-audio-api/#dictdef-dynamicscompressoroptions
+dictionary DynamicsCompressorOptions : AudioNodeOptions {
+ float attack = 0.003;
+ float knee = 30;
+ float ratio = 12;
+ float release = 0.25;
+ float threshold = -24;
+}; \ No newline at end of file
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/gain_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/gain_node.cc
new file mode 100644
index 00000000000..c75e03c83ea
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/gain_node.cc
@@ -0,0 +1,179 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#include "third_party/blink/renderer/modules/webaudio/audio_node_input.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node_output.h"
+#include "third_party/blink/renderer/modules/webaudio/gain_node.h"
+#include "third_party/blink/renderer/modules/webaudio/gain_options.h"
+#include "third_party/blink/renderer/platform/audio/audio_bus.h"
+#include "third_party/blink/renderer/platform/audio/audio_utilities.h"
+
+namespace blink {
+
+GainHandler::GainHandler(AudioNode& node,
+ float sample_rate,
+ AudioParamHandler& gain)
+ : AudioHandler(kNodeTypeGain, node, sample_rate),
+ gain_(&gain),
+ sample_accurate_gain_values_(
+ AudioUtilities::kRenderQuantumFrames) // FIXME: can probably
+ // share temp buffer
+ // in context
+{
+ AddInput();
+ AddOutput(1);
+
+ Initialize();
+}
+
+scoped_refptr<GainHandler> GainHandler::Create(AudioNode& node,
+ float sample_rate,
+ AudioParamHandler& gain) {
+ return base::AdoptRef(new GainHandler(node, sample_rate, gain));
+}
+
+void GainHandler::Process(size_t frames_to_process) {
+ // FIXME: for some cases there is a nice optimization to avoid processing
+ // here, and let the gain change happen in the summing junction input of the
+ // AudioNode we're connected to. Then we can avoid all of the following:
+
+ AudioBus* output_bus = Output(0).Bus();
+ DCHECK(output_bus);
+
+ if (!IsInitialized() || !Input(0).IsConnected()) {
+ output_bus->Zero();
+ } else {
+ AudioBus* input_bus = Input(0).Bus();
+
+ if (gain_->HasSampleAccurateValues()) {
+ // Apply sample-accurate gain scaling for precise envelopes, grain
+ // windows, etc.
+ DCHECK_LE(frames_to_process, sample_accurate_gain_values_.size());
+ if (frames_to_process <= sample_accurate_gain_values_.size()) {
+ float* gain_values = sample_accurate_gain_values_.Data();
+ gain_->CalculateSampleAccurateValues(gain_values, frames_to_process);
+ output_bus->CopyWithSampleAccurateGainValuesFrom(
+ *input_bus, gain_values, frames_to_process);
+ }
+ } else {
+ // Apply the gain.
+ if (gain_->Value() == 0) {
+ // If the gain is 0, just zero the bus and set the silence hint.
+ output_bus->Zero();
+ } else {
+ output_bus->CopyWithGainFrom(*input_bus, gain_->Value());
+ }
+ }
+ }
+}
+
+void GainHandler::ProcessOnlyAudioParams(size_t frames_to_process) {
+ DCHECK(Context()->IsAudioThread());
+ DCHECK_LE(frames_to_process, AudioUtilities::kRenderQuantumFrames);
+
+ float values[AudioUtilities::kRenderQuantumFrames];
+
+ gain_->CalculateSampleAccurateValues(values, frames_to_process);
+}
+
+// FIXME: this can go away when we do mixing with gain directly in summing
+// junction of AudioNodeInput
+//
+// As soon as we know the channel count of our input, we can lazily initialize.
+// Sometimes this may be called more than once with different channel counts, in
+// which case we must safely uninitialize and then re-initialize with the new
+// channel count.
+void GainHandler::CheckNumberOfChannelsForInput(AudioNodeInput* input) {
+ DCHECK(Context()->IsAudioThread());
+ DCHECK(Context()->IsGraphOwner());
+
+ DCHECK(input);
+ DCHECK_EQ(input, &this->Input(0));
+ if (input != &this->Input(0))
+ return;
+
+ unsigned number_of_channels = input->NumberOfChannels();
+
+ if (IsInitialized() && number_of_channels != Output(0).NumberOfChannels()) {
+ // We're already initialized but the channel count has changed.
+ Uninitialize();
+ }
+
+ if (!IsInitialized()) {
+ // This will propagate the channel count to any nodes connected further
+ // downstream in the graph.
+ Output(0).SetNumberOfChannels(number_of_channels);
+ Initialize();
+ }
+
+ AudioHandler::CheckNumberOfChannelsForInput(input);
+}
+
+// ----------------------------------------------------------------
+
+GainNode::GainNode(BaseAudioContext& context)
+ : AudioNode(context),
+ gain_(AudioParam::Create(context, kParamTypeGainGain, "Gain.gain", 1.0)) {
+ SetHandler(
+ GainHandler::Create(*this, context.sampleRate(), gain_->Handler()));
+}
+
+GainNode* GainNode::Create(BaseAudioContext& context,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ if (context.IsContextClosed()) {
+ context.ThrowExceptionForClosedState(exception_state);
+ return nullptr;
+ }
+
+ return new GainNode(context);
+}
+
+GainNode* GainNode::Create(BaseAudioContext* context,
+ const GainOptions& options,
+ ExceptionState& exception_state) {
+ GainNode* node = Create(*context, exception_state);
+
+ if (!node)
+ return nullptr;
+
+ node->HandleChannelOptions(options, exception_state);
+
+ node->gain()->setValue(options.gain());
+
+ return node;
+}
+
+AudioParam* GainNode::gain() const {
+ return gain_;
+}
+
+void GainNode::Trace(blink::Visitor* visitor) {
+ visitor->Trace(gain_);
+ AudioNode::Trace(visitor);
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/gain_node.h b/chromium/third_party/blink/renderer/modules/webaudio/gain_node.h
new file mode 100644
index 00000000000..9890748b080
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/gain_node.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_GAIN_NODE_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_GAIN_NODE_H_
+
+#include "base/memory/scoped_refptr.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_param.h"
+#include "third_party/blink/renderer/platform/wtf/threading.h"
+
+namespace blink {
+
+class BaseAudioContext;
+class GainOptions;
+
+// GainNode is an AudioNode with one input and one output which applies a gain
+// (volume) change to the audio signal. De-zippering (smoothing) is applied
+// when the gain value is changed dynamically.
+
+class GainHandler final : public AudioHandler {
+ public:
+ static scoped_refptr<GainHandler> Create(AudioNode&,
+ float sample_rate,
+ AudioParamHandler& gain);
+
+ // AudioHandler
+ void Process(size_t frames_to_process) override;
+ void ProcessOnlyAudioParams(size_t frames_to_process) override;
+
+ // Called in the main thread when the number of channels for the input may
+ // have changed.
+ void CheckNumberOfChannelsForInput(AudioNodeInput*) override;
+
+ // AudioNode
+ double TailTime() const override { return 0; }
+ double LatencyTime() const override { return 0; }
+ bool RequiresTailProcessing() const final { return false; }
+
+ private:
+ GainHandler(AudioNode&, float sample_rate, AudioParamHandler& gain);
+
+ scoped_refptr<AudioParamHandler> gain_;
+
+ AudioFloatArray sample_accurate_gain_values_;
+};
+
+class GainNode final : public AudioNode {
+ DEFINE_WRAPPERTYPEINFO();
+
+ public:
+ static GainNode* Create(BaseAudioContext&, ExceptionState&);
+ static GainNode* Create(BaseAudioContext*,
+ const GainOptions&,
+ ExceptionState&);
+ virtual void Trace(blink::Visitor*);
+
+ AudioParam* gain() const;
+
+ private:
+ GainNode(BaseAudioContext&);
+
+ Member<AudioParam> gain_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_GAIN_NODE_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/gain_node.idl b/chromium/third_party/blink/renderer/modules/webaudio/gain_node.idl
new file mode 100644
index 00000000000..4eb0275e7fc
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/gain_node.idl
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+// See https://webaudio.github.io/web-audio-api/#gainnode
+[
+ Constructor(BaseAudioContext context, optional GainOptions options),
+ RaisesException=Constructor,
+ Measure
+]
+interface GainNode : AudioNode {
+ // FIXME: eventually it will be interesting to remove the readonly restriction, but need to properly deal with thread safety here.
+ readonly attribute AudioParam gain;
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/gain_options.idl b/chromium/third_party/blink/renderer/modules/webaudio/gain_options.idl
new file mode 100644
index 00000000000..6a6770657cb
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/gain_options.idl
@@ -0,0 +1,8 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See https://webaudio.github.io/web-audio-api/#dictdef-gainoptions
+dictionary GainOptions : AudioNodeOptions {
+ float gain = 1;
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/iir_filter_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/iir_filter_node.cc
new file mode 100644
index 00000000000..b6d937a9f24
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/iir_filter_node.cc
@@ -0,0 +1,243 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/modules/webaudio/iir_filter_node.h"
+
+#include <memory>
+
+#include "third_party/blink/renderer/bindings/core/v8/exception_messages.h"
+#include "third_party/blink/renderer/bindings/core/v8/exception_state.h"
+#include "third_party/blink/renderer/core/dom/exception_code.h"
+#include "third_party/blink/renderer/core/inspector/console_message.h"
+#include "third_party/blink/renderer/modules/webaudio/base_audio_context.h"
+#include "third_party/blink/renderer/modules/webaudio/iir_filter_options.h"
+#include "third_party/blink/renderer/platform/histogram.h"
+
+namespace blink {
+
+IIRFilterHandler::IIRFilterHandler(AudioNode& node,
+ float sample_rate,
+ const Vector<double>& feedforward_coef,
+ const Vector<double>& feedback_coef)
+ : AudioBasicProcessorHandler(
+ kNodeTypeIIRFilter,
+ node,
+ sample_rate,
+ std::make_unique<IIRProcessor>(sample_rate,
+ 1,
+ feedforward_coef,
+ feedback_coef)) {}
+
+scoped_refptr<IIRFilterHandler> IIRFilterHandler::Create(
+ AudioNode& node,
+ float sample_rate,
+ const Vector<double>& feedforward_coef,
+ const Vector<double>& feedback_coef) {
+ return base::AdoptRef(
+ new IIRFilterHandler(node, sample_rate, feedforward_coef, feedback_coef));
+}
+
+// Determine if filter is stable based on the feedback coefficients.
+// We compute the reflection coefficients for the filter. If, at any
+// point, the magnitude of the reflection coefficient is greater than
+// or equal to 1, the filter is declared unstable.
+//
+// Let A(z) be the feedback polynomial given by
+// A[n](z) = 1 + a[1]/z + a[2]/z^2 + ... + a[n]/z^n
+//
+// The first reflection coefficient k[n] = a[n]. Then, recursively compute
+//
+// A[n-1](z) = (A[n](z) - k[n]*A[n](1/z)/z^n)/(1-k[n]^2);
+//
+// stopping at A[1](z). If at any point |k[n]| >= 1, the filter is
+// unstable.
+static bool IsFilterStable(const Vector<double>& feedback_coef) {
+ // Make a copy of the feedback coefficients
+ Vector<double> coef(feedback_coef);
+ int order = coef.size() - 1;
+
+ // If necessary, normalize filter coefficients so that constant term is 1.
+ if (coef[0] != 1) {
+ for (int m = 1; m <= order; ++m)
+ coef[m] /= coef[0];
+ coef[0] = 1;
+ }
+
+ // Begin recursion, using a work array to hold intermediate results.
+ Vector<double> work(order + 1);
+ for (int n = order; n >= 1; --n) {
+ double k = coef[n];
+
+ if (std::fabs(k) >= 1)
+ return false;
+
+ // Note that A[n](1/z)/z^n is basically the coefficients of A[n]
+ // in reverse order.
+ double factor = 1 - k * k;
+ for (int m = 0; m <= n; ++m)
+ work[m] = (coef[m] - k * coef[n - m]) / factor;
+ coef.swap(work);
+ }
+
+ return true;
+}
+
+IIRFilterNode::IIRFilterNode(BaseAudioContext& context,
+ const Vector<double>& feedforward_coef,
+ const Vector<double>& feedback_coef)
+ : AudioNode(context) {
+ SetHandler(IIRFilterHandler::Create(*this, context.sampleRate(),
+ feedforward_coef, feedback_coef));
+
+ // Histogram of the IIRFilter order. createIIRFilter ensures that the length
+ // of |feedbackCoef| is in the range [1, IIRFilter::kMaxOrder + 1]. The order
+ // is one less than the length of this vector.
+ DEFINE_STATIC_LOCAL(SparseHistogram, filter_order_histogram,
+ ("WebAudio.IIRFilterNode.Order"));
+
+ filter_order_histogram.Sample(feedback_coef.size() - 1);
+}
+
+IIRFilterNode* IIRFilterNode::Create(BaseAudioContext& context,
+ const Vector<double>& feedforward_coef,
+ const Vector<double>& feedback_coef,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ if (context.IsContextClosed()) {
+ context.ThrowExceptionForClosedState(exception_state);
+ return nullptr;
+ }
+
+ if (feedback_coef.size() == 0 ||
+ (feedback_coef.size() > IIRFilter::kMaxOrder + 1)) {
+ exception_state.ThrowDOMException(
+ kNotSupportedError,
+ ExceptionMessages::IndexOutsideRange<size_t>(
+ "number of feedback coefficients", feedback_coef.size(), 1,
+ ExceptionMessages::kInclusiveBound, IIRFilter::kMaxOrder + 1,
+ ExceptionMessages::kInclusiveBound));
+ return nullptr;
+ }
+
+ if (feedforward_coef.size() == 0 ||
+ (feedforward_coef.size() > IIRFilter::kMaxOrder + 1)) {
+ exception_state.ThrowDOMException(
+ kNotSupportedError,
+ ExceptionMessages::IndexOutsideRange<size_t>(
+ "number of feedforward coefficients", feedforward_coef.size(), 1,
+ ExceptionMessages::kInclusiveBound, IIRFilter::kMaxOrder + 1,
+ ExceptionMessages::kInclusiveBound));
+ return nullptr;
+ }
+
+ if (feedback_coef[0] == 0) {
+ exception_state.ThrowDOMException(
+ kInvalidStateError, "First feedback coefficient cannot be zero.");
+ return nullptr;
+ }
+
+ bool has_non_zero_coef = false;
+
+ for (size_t k = 0; k < feedforward_coef.size(); ++k) {
+ if (feedforward_coef[k] != 0) {
+ has_non_zero_coef = true;
+ break;
+ }
+ }
+
+ if (!has_non_zero_coef) {
+ exception_state.ThrowDOMException(
+ kInvalidStateError,
+ "At least one feedforward coefficient must be non-zero.");
+ return nullptr;
+ }
+
+ if (!IsFilterStable(feedback_coef)) {
+ StringBuilder message;
+ message.Append("Unstable IIRFilter with feedback coefficients: [");
+ message.AppendNumber(feedback_coef[0]);
+ for (size_t k = 1; k < feedback_coef.size(); ++k) {
+ message.Append(", ");
+ message.AppendNumber(feedback_coef[k]);
+ }
+ message.Append(']');
+
+ context.GetExecutionContext()->AddConsoleMessage(ConsoleMessage::Create(
+ kJSMessageSource, kWarningMessageLevel, message.ToString()));
+ }
+
+ return new IIRFilterNode(context, feedforward_coef, feedback_coef);
+}
+
+IIRFilterNode* IIRFilterNode::Create(BaseAudioContext* context,
+ const IIRFilterOptions& options,
+ ExceptionState& exception_state) {
+ if (!options.hasFeedforward()) {
+ exception_state.ThrowDOMException(
+ kNotFoundError, "IIRFilterOptions: feedforward is required.");
+ return nullptr;
+ }
+
+ if (!options.hasFeedback()) {
+ exception_state.ThrowDOMException(
+ kNotFoundError, "IIRFilterOptions: feedback is required.");
+ return nullptr;
+ }
+
+ IIRFilterNode* node = Create(*context, options.feedforward(),
+ options.feedback(), exception_state);
+
+ if (!node)
+ return nullptr;
+
+ node->HandleChannelOptions(options, exception_state);
+
+ return node;
+}
+
+void IIRFilterNode::Trace(blink::Visitor* visitor) {
+ AudioNode::Trace(visitor);
+}
+
+IIRProcessor* IIRFilterNode::GetIIRFilterProcessor() const {
+ return static_cast<IIRProcessor*>(
+ static_cast<IIRFilterHandler&>(Handler()).Processor());
+}
+
+void IIRFilterNode::getFrequencyResponse(
+ NotShared<const DOMFloat32Array> frequency_hz,
+ NotShared<DOMFloat32Array> mag_response,
+ NotShared<DOMFloat32Array> phase_response,
+ ExceptionState& exception_state) {
+ unsigned frequency_hz_length = frequency_hz.View()->length();
+
+ // All the arrays must have the same length. Just verify that all
+ // the arrays have the same length as the |frequency_hz| array.
+ if (mag_response.View()->length() != frequency_hz_length) {
+ exception_state.ThrowDOMException(
+ kInvalidAccessError,
+ ExceptionMessages::IndexOutsideRange(
+ "magResponse length", mag_response.View()->length(),
+ frequency_hz_length, ExceptionMessages::kInclusiveBound,
+ frequency_hz_length, ExceptionMessages::kInclusiveBound));
+ return;
+ }
+
+ if (phase_response.View()->length() != frequency_hz_length) {
+ exception_state.ThrowDOMException(
+ kInvalidAccessError,
+ ExceptionMessages::IndexOutsideRange(
+ "phaseResponse length", phase_response.View()->length(),
+ frequency_hz_length, ExceptionMessages::kInclusiveBound,
+ frequency_hz_length, ExceptionMessages::kInclusiveBound));
+ return;
+ }
+
+ GetIIRFilterProcessor()->GetFrequencyResponse(
+ frequency_hz_length, frequency_hz.View()->Data(),
+ mag_response.View()->Data(), phase_response.View()->Data());
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/iir_filter_node.h b/chromium/third_party/blink/renderer/modules/webaudio/iir_filter_node.h
new file mode 100644
index 00000000000..67b1692e36b
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/iir_filter_node.h
@@ -0,0 +1,67 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_IIR_FILTER_NODE_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_IIR_FILTER_NODE_H_
+
+#include "third_party/blink/renderer/core/typed_arrays/array_buffer_view_helpers.h"
+#include "third_party/blink/renderer/core/typed_arrays/dom_typed_array.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_basic_processor_handler.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node.h"
+#include "third_party/blink/renderer/modules/webaudio/iir_processor.h"
+
+namespace blink {
+
+class BaseAudioContext;
+class ExceptionState;
+class IIRFilterOptions;
+
+class IIRFilterHandler : public AudioBasicProcessorHandler {
+ public:
+ static scoped_refptr<IIRFilterHandler> Create(
+ AudioNode&,
+ float sample_rate,
+ const Vector<double>& feedforward_coef,
+ const Vector<double>& feedback_coef);
+
+ private:
+ IIRFilterHandler(AudioNode&,
+ float sample_rate,
+ const Vector<double>& feedforward_coef,
+ const Vector<double>& feedback_coef);
+};
+
+class IIRFilterNode : public AudioNode {
+ DEFINE_WRAPPERTYPEINFO();
+
+ public:
+ static IIRFilterNode* Create(BaseAudioContext&,
+ const Vector<double>& feedforward,
+ const Vector<double>& feedback,
+ ExceptionState&);
+
+ static IIRFilterNode* Create(BaseAudioContext*,
+ const IIRFilterOptions&,
+ ExceptionState&);
+
+ virtual void Trace(blink::Visitor*);
+
+ // Get the magnitude and phase response of the filter at the given
+ // set of frequencies (in Hz). The phase response is in radians.
+ void getFrequencyResponse(NotShared<const DOMFloat32Array> frequency_hz,
+ NotShared<DOMFloat32Array> mag_response,
+ NotShared<DOMFloat32Array> phase_response,
+ ExceptionState&);
+
+ private:
+ IIRFilterNode(BaseAudioContext&,
+ const Vector<double>& denominator,
+ const Vector<double>& numerator);
+
+ IIRProcessor* GetIIRFilterProcessor() const;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_IIR_FILTER_NODE_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/iir_filter_node.idl b/chromium/third_party/blink/renderer/modules/webaudio/iir_filter_node.idl
new file mode 100644
index 00000000000..b98d3196a6f
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/iir_filter_node.idl
@@ -0,0 +1,15 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See https://webaudio.github.io/web-audio-api/#iirfilternode
+[
+ Constructor(BaseAudioContext context, IIRFilterOptions options),
+ RaisesException=Constructor,
+ Measure
+]
+interface IIRFilterNode : AudioNode {
+ [RaisesException] void getFrequencyResponse(Float32Array frequencyHz,
+ Float32Array magResponse,
+ Float32Array phaseResponse);
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/iir_filter_options.idl b/chromium/third_party/blink/renderer/modules/webaudio/iir_filter_options.idl
new file mode 100644
index 00000000000..22804d8917d
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/iir_filter_options.idl
@@ -0,0 +1,9 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See https://webaudio.github.io/web-audio-api/#dictdef-iirfilteroptions
+dictionary IIRFilterOptions : AudioNodeOptions {
+ required sequence<double> feedforward;
+ required sequence<double> feedback;
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/iir_processor.cc b/chromium/third_party/blink/renderer/modules/webaudio/iir_processor.cc
new file mode 100644
index 00000000000..b748c3a61be
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/iir_processor.cc
@@ -0,0 +1,89 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/modules/webaudio/iir_processor.h"
+
+#include <memory>
+#include "third_party/blink/renderer/modules/webaudio/iirdsp_kernel.h"
+
+namespace blink {
+
+IIRProcessor::IIRProcessor(float sample_rate,
+ size_t number_of_channels,
+ const Vector<double>& feedforward_coef,
+ const Vector<double>& feedback_coef)
+ : AudioDSPKernelProcessor(sample_rate, number_of_channels) {
+ unsigned feedback_length = feedback_coef.size();
+ unsigned feedforward_length = feedforward_coef.size();
+ DCHECK_GT(feedback_length, 0u);
+ DCHECK_GT(feedforward_length, 0u);
+
+ feedforward_.Allocate(feedforward_length);
+ feedback_.Allocate(feedback_length);
+ feedforward_.CopyToRange(feedforward_coef.data(), 0, feedforward_length);
+ feedback_.CopyToRange(feedback_coef.data(), 0, feedback_length);
+
+ // Need to scale the feedback and feedforward coefficients appropriately.
+ // (It's up to the caller to ensure feedbackCoef[0] is not 0.)
+ DCHECK_NE(feedback_coef[0], 0);
+
+ if (feedback_coef[0] != 1) {
+ // The provided filter is:
+ //
+ // a[0]*y(n) + a[1]*y(n-1) + ... = b[0]*x(n) + b[1]*x(n-1) + ...
+ //
+ // We want the leading coefficient of y(n) to be 1:
+ //
+ // y(n) + a[1]/a[0]*y(n-1) + ... = b[0]/a[0]*x(n) + b[1]/a[0]*x(n-1) + ...
+ //
+ // Thus, the feedback and feedforward coefficients need to be scaled by
+ // 1/a[0].
+ float scale = feedback_coef[0];
+ for (unsigned k = 1; k < feedback_length; ++k)
+ feedback_[k] /= scale;
+
+ for (unsigned k = 0; k < feedforward_length; ++k)
+ feedforward_[k] /= scale;
+
+ // The IIRFilter checks to make sure this coefficient is 1, so make it so.
+ feedback_[0] = 1;
+ }
+
+ response_kernel_ = std::make_unique<IIRDSPKernel>(this);
+}
+
+IIRProcessor::~IIRProcessor() {
+ if (IsInitialized())
+ Uninitialize();
+}
+
+std::unique_ptr<AudioDSPKernel> IIRProcessor::CreateKernel() {
+ return std::make_unique<IIRDSPKernel>(this);
+}
+
+void IIRProcessor::Process(const AudioBus* source,
+ AudioBus* destination,
+ size_t frames_to_process) {
+ if (!IsInitialized()) {
+ destination->Zero();
+ return;
+ }
+
+ // For each channel of our input, process using the corresponding IIRDSPKernel
+ // into the output channel.
+ for (unsigned i = 0; i < kernels_.size(); ++i)
+ kernels_[i]->Process(source->Channel(i)->Data(),
+ destination->Channel(i)->MutableData(),
+ frames_to_process);
+}
+
+void IIRProcessor::GetFrequencyResponse(int n_frequencies,
+ const float* frequency_hz,
+ float* mag_response,
+ float* phase_response) {
+ response_kernel_->GetFrequencyResponse(n_frequencies, frequency_hz,
+ mag_response, phase_response);
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/iir_processor.h b/chromium/third_party/blink/renderer/modules/webaudio/iir_processor.h
new file mode 100644
index 00000000000..68b7dae46e0
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/iir_processor.h
@@ -0,0 +1,52 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_IIR_PROCESSOR_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_IIR_PROCESSOR_H_
+
+#include <memory>
+#include "third_party/blink/renderer/modules/webaudio/audio_node.h"
+#include "third_party/blink/renderer/platform/audio/audio_dsp_kernel.h"
+#include "third_party/blink/renderer/platform/audio/audio_dsp_kernel_processor.h"
+#include "third_party/blink/renderer/platform/audio/iir_filter.h"
+
+namespace blink {
+
+class IIRDSPKernel;
+
+class IIRProcessor final : public AudioDSPKernelProcessor {
+ public:
+ IIRProcessor(float sample_rate,
+ size_t number_of_channels,
+ const Vector<double>& feedforward_coef,
+ const Vector<double>& feedback_coef);
+ ~IIRProcessor() override;
+
+ std::unique_ptr<AudioDSPKernel> CreateKernel() override;
+
+ void Process(const AudioBus* source,
+ AudioBus* destination,
+ size_t frames_to_process) override;
+
+ // Get the magnitude and phase response of the filter at the given
+ // set of frequencies (in Hz). The phase response is in radians.
+ void GetFrequencyResponse(int n_frequencies,
+ const float* frequency_hz,
+ float* mag_response,
+ float* phase_response);
+
+ AudioDoubleArray* Feedback() { return &feedback_; }
+ AudioDoubleArray* Feedforward() { return &feedforward_; }
+
+ private:
+ // The feedback and feedforward filter coefficients for the IIR filter.
+ AudioDoubleArray feedback_;
+ AudioDoubleArray feedforward_;
+ // This holds the IIR kernel for computing the frequency response.
+ std::unique_ptr<IIRDSPKernel> response_kernel_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_IIR_PROCESSOR_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/iirdsp_kernel.cc b/chromium/third_party/blink/renderer/modules/webaudio/iirdsp_kernel.cc
new file mode 100644
index 00000000000..f6be62ceb22
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/iirdsp_kernel.cc
@@ -0,0 +1,62 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/modules/webaudio/iirdsp_kernel.h"
+
+#include "third_party/blink/renderer/platform/wtf/math_extras.h"
+
+namespace blink {
+
+IIRDSPKernel::IIRDSPKernel(IIRProcessor* processor)
+ : AudioDSPKernel(processor),
+ iir_(processor->Feedforward(), processor->Feedback()) {
+ tail_time_ = iir_.TailTime(processor->SampleRate());
+}
+
+void IIRDSPKernel::Process(const float* source,
+ float* destination,
+ size_t frames_to_process) {
+ DCHECK(source);
+ DCHECK(destination);
+
+ iir_.Process(source, destination, frames_to_process);
+}
+
+void IIRDSPKernel::GetFrequencyResponse(int n_frequencies,
+ const float* frequency_hz,
+ float* mag_response,
+ float* phase_response) {
+ bool is_good =
+ n_frequencies > 0 && frequency_hz && mag_response && phase_response;
+ DCHECK(is_good);
+ if (!is_good)
+ return;
+
+ Vector<float> frequency(n_frequencies);
+
+ double nyquist = this->Nyquist();
+
+ // Convert from frequency in Hz to normalized frequency (0 -> 1),
+ // with 1 equal to the Nyquist frequency.
+ for (int k = 0; k < n_frequencies; ++k)
+ frequency[k] = frequency_hz[k] / nyquist;
+
+ iir_.GetFrequencyResponse(n_frequencies, frequency.data(), mag_response,
+ phase_response);
+}
+
+bool IIRDSPKernel::RequiresTailProcessing() const {
+ // Always return true even if the tail time and latency might both be zero.
+ return true;
+}
+
+double IIRDSPKernel::TailTime() const {
+ return tail_time_;
+}
+
+double IIRDSPKernel::LatencyTime() const {
+ return 0;
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/iirdsp_kernel.h b/chromium/third_party/blink/renderer/modules/webaudio/iirdsp_kernel.h
new file mode 100644
index 00000000000..bdf62ff185c
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/iirdsp_kernel.h
@@ -0,0 +1,46 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_IIRDSP_KERNEL_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_IIRDSP_KERNEL_H_
+
+#include "third_party/blink/renderer/modules/webaudio/iir_processor.h"
+#include "third_party/blink/renderer/platform/audio/audio_dsp_kernel.h"
+#include "third_party/blink/renderer/platform/audio/iir_filter.h"
+
+namespace blink {
+
+class IIRProcessor;
+
+class IIRDSPKernel final : public AudioDSPKernel {
+ public:
+ explicit IIRDSPKernel(IIRProcessor*);
+
+ // AudioDSPKernel
+ void Process(const float* source,
+ float* dest,
+ size_t frames_to_process) override;
+ void Reset() override { iir_.Reset(); }
+
+ // Get the magnitude and phase response of the filter at the given
+ // set of frequencies (in Hz). The phase response is in radians.
+ void GetFrequencyResponse(int n_frequencies,
+ const float* frequency_hz,
+ float* mag_response,
+ float* phase_response);
+
+ double TailTime() const override;
+ double LatencyTime() const override;
+ bool RequiresTailProcessing() const final;
+
+ protected:
+ IIRFilter iir_;
+
+ private:
+ double tail_time_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_IIRDSP_KERNEL_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/media_element_audio_source_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/media_element_audio_source_node.cc
new file mode 100644
index 00000000000..0e1366ed07a
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/media_element_audio_source_node.cc
@@ -0,0 +1,321 @@
+/*
+ * Copyright (C) 2011, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#include "third_party/blink/renderer/modules/webaudio/media_element_audio_source_node.h"
+
+#include "third_party/blink/public/platform/task_type.h"
+#include "third_party/blink/renderer/bindings/core/v8/exception_state.h"
+#include "third_party/blink/renderer/core/html/media/html_media_element.h"
+#include "third_party/blink/renderer/core/inspector/console_message.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node_output.h"
+#include "third_party/blink/renderer/modules/webaudio/base_audio_context.h"
+#include "third_party/blink/renderer/modules/webaudio/media_element_audio_source_options.h"
+#include "third_party/blink/renderer/platform/audio/audio_utilities.h"
+#include "third_party/blink/renderer/platform/cross_thread_functional.h"
+#include "third_party/blink/renderer/platform/weborigin/security_origin.h"
+#include "third_party/blink/renderer/platform/wtf/locker.h"
+
+namespace blink {
+
+MediaElementAudioSourceHandler::MediaElementAudioSourceHandler(
+ AudioNode& node,
+ HTMLMediaElement& media_element)
+ : AudioHandler(kNodeTypeMediaElementAudioSource,
+ node,
+ node.context()->sampleRate()),
+ media_element_(media_element),
+ source_number_of_channels_(0),
+ source_sample_rate_(0),
+ passes_current_src_cors_access_check_(
+ PassesCurrentSrcCORSAccessCheck(media_element.currentSrc())),
+ maybe_print_cors_message_(!passes_current_src_cors_access_check_),
+ current_src_string_(media_element.currentSrc().GetString()) {
+ DCHECK(IsMainThread());
+ // Default to stereo. This could change depending on what the media element
+ // .src is set to.
+ AddOutput(2);
+
+ if (Context()->GetExecutionContext()) {
+ task_runner_ = Context()->GetExecutionContext()->GetTaskRunner(
+ TaskType::kMediaElementEvent);
+ }
+
+ Initialize();
+}
+
+scoped_refptr<MediaElementAudioSourceHandler>
+MediaElementAudioSourceHandler::Create(AudioNode& node,
+ HTMLMediaElement& media_element) {
+ return base::AdoptRef(
+ new MediaElementAudioSourceHandler(node, media_element));
+}
+
+MediaElementAudioSourceHandler::~MediaElementAudioSourceHandler() {
+ Uninitialize();
+}
+
+HTMLMediaElement* MediaElementAudioSourceHandler::MediaElement() const {
+ return media_element_.Get();
+}
+
+void MediaElementAudioSourceHandler::Dispose() {
+ media_element_->SetAudioSourceNode(nullptr);
+ AudioHandler::Dispose();
+}
+
+void MediaElementAudioSourceHandler::SetFormat(size_t number_of_channels,
+ float source_sample_rate) {
+ if (number_of_channels != source_number_of_channels_ ||
+ source_sample_rate != source_sample_rate_) {
+ if (!number_of_channels ||
+ number_of_channels > BaseAudioContext::MaxNumberOfChannels() ||
+ !AudioUtilities::IsValidAudioBufferSampleRate(source_sample_rate)) {
+ // process() will generate silence for these uninitialized values.
+ DLOG(ERROR) << "setFormat(" << number_of_channels << ", "
+ << source_sample_rate << ") - unhandled format change";
+ // Synchronize with process().
+ Locker<MediaElementAudioSourceHandler> locker(*this);
+ source_number_of_channels_ = 0;
+ source_sample_rate_ = 0;
+ return;
+ }
+
+ // Synchronize with process() to protect m_sourceNumberOfChannels,
+ // m_sourceSampleRate, and m_multiChannelResampler.
+ Locker<MediaElementAudioSourceHandler> locker(*this);
+
+ source_number_of_channels_ = number_of_channels;
+ source_sample_rate_ = source_sample_rate;
+
+ if (source_sample_rate != Context()->sampleRate()) {
+ double scale_factor = source_sample_rate / Context()->sampleRate();
+ multi_channel_resampler_ = std::make_unique<MultiChannelResampler>(
+ scale_factor, number_of_channels);
+ } else {
+ // Bypass resampling.
+ multi_channel_resampler_.reset();
+ }
+
+ {
+ // The context must be locked when changing the number of output channels.
+ BaseAudioContext::GraphAutoLocker context_locker(Context());
+
+ // Do any necesssary re-configuration to the output's number of channels.
+ Output(0).SetNumberOfChannels(number_of_channels);
+ }
+ }
+}
+
+bool MediaElementAudioSourceHandler::PassesCORSAccessCheck() {
+ DCHECK(MediaElement());
+
+ return (MediaElement()->GetWebMediaPlayer() &&
+ MediaElement()->GetWebMediaPlayer()->DidPassCORSAccessCheck()) ||
+ passes_current_src_cors_access_check_;
+}
+
+void MediaElementAudioSourceHandler::OnCurrentSrcChanged(
+ const KURL& current_src) {
+ DCHECK(IsMainThread());
+
+ // Synchronize with process().
+ Locker<MediaElementAudioSourceHandler> locker(*this);
+
+ passes_current_src_cors_access_check_ =
+ PassesCurrentSrcCORSAccessCheck(current_src);
+
+ // Make a note if we need to print a console message and save the |curentSrc|
+ // for use in the message. Need to wait until later to print the message in
+ // case HTMLMediaElement allows access.
+ maybe_print_cors_message_ = !passes_current_src_cors_access_check_;
+ current_src_string_ = current_src.GetString();
+}
+
+bool MediaElementAudioSourceHandler::PassesCurrentSrcCORSAccessCheck(
+ const KURL& current_src) {
+ DCHECK(IsMainThread());
+ return Context()->GetSecurityOrigin() &&
+ Context()->GetSecurityOrigin()->CanRequest(current_src);
+}
+
+void MediaElementAudioSourceHandler::PrintCORSMessage(const String& message) {
+ if (Context()->GetExecutionContext()) {
+ Context()->GetExecutionContext()->AddConsoleMessage(
+ ConsoleMessage::Create(kSecurityMessageSource, kInfoMessageLevel,
+ "MediaElementAudioSource outputs zeroes due to "
+ "CORS access restrictions for " +
+ message));
+ }
+}
+
+void MediaElementAudioSourceHandler::Process(size_t number_of_frames) {
+ AudioBus* output_bus = Output(0).Bus();
+
+ // Use a tryLock() to avoid contention in the real-time audio thread.
+ // If we fail to acquire the lock then the HTMLMediaElement must be in the
+ // middle of reconfiguring its playback engine, so we output silence in this
+ // case.
+ MutexTryLocker try_locker(process_lock_);
+ if (try_locker.Locked()) {
+ if (!MediaElement() || !source_sample_rate_) {
+ output_bus->Zero();
+ return;
+ }
+
+ // TODO(crbug.com/811516): Although OnSetFormat() requested the output bus
+ // channels, the actual channel count might have not been changed yet.
+ // Output silence for such case until the channel count is resolved.
+ if (source_number_of_channels_ != output_bus->NumberOfChannels()) {
+ output_bus->Zero();
+ return;
+ }
+
+ AudioSourceProvider& provider = MediaElement()->GetAudioSourceProvider();
+ // Grab data from the provider so that the element continues to make
+ // progress, even if we're going to output silence anyway.
+ if (multi_channel_resampler_.get()) {
+ DCHECK_NE(source_sample_rate_, Context()->sampleRate());
+ multi_channel_resampler_->Process(&provider, output_bus,
+ number_of_frames);
+ } else {
+ // Bypass the resampler completely if the source is at the context's
+ // sample-rate.
+ DCHECK_EQ(source_sample_rate_, Context()->sampleRate());
+ provider.ProvideInput(output_bus, number_of_frames);
+ }
+ // Output silence if we don't have access to the element.
+ if (!PassesCORSAccessCheck()) {
+ if (maybe_print_cors_message_) {
+ // Print a CORS message, but just once for each change in the current
+ // media element source, and only if we have a document to print to.
+ maybe_print_cors_message_ = false;
+ PostCrossThreadTask(
+ *task_runner_, FROM_HERE,
+ CrossThreadBind(&MediaElementAudioSourceHandler::PrintCORSMessage,
+ WrapRefCounted(this), current_src_string_));
+ }
+ output_bus->Zero();
+ }
+ } else {
+ // We failed to acquire the lock.
+ output_bus->Zero();
+ }
+}
+
+void MediaElementAudioSourceHandler::lock() {
+ process_lock_.lock();
+}
+
+void MediaElementAudioSourceHandler::unlock() {
+ process_lock_.unlock();
+}
+
+// ----------------------------------------------------------------
+
+MediaElementAudioSourceNode::MediaElementAudioSourceNode(
+ BaseAudioContext& context,
+ HTMLMediaElement& media_element)
+ : AudioNode(context) {
+ SetHandler(MediaElementAudioSourceHandler::Create(*this, media_element));
+}
+
+MediaElementAudioSourceNode* MediaElementAudioSourceNode::Create(
+ BaseAudioContext& context,
+ HTMLMediaElement& media_element,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ if (context.IsContextClosed()) {
+ context.ThrowExceptionForClosedState(exception_state);
+ return nullptr;
+ }
+
+ // First check if this media element already has a source node.
+ if (media_element.AudioSourceNode()) {
+ exception_state.ThrowDOMException(kInvalidStateError,
+ "HTMLMediaElement already connected "
+ "previously to a different "
+ "MediaElementSourceNode.");
+ return nullptr;
+ }
+
+ MediaElementAudioSourceNode* node =
+ new MediaElementAudioSourceNode(context, media_element);
+
+ if (node) {
+ media_element.SetAudioSourceNode(node);
+ // context keeps reference until node is disconnected
+ context.NotifySourceNodeStartedProcessing(node);
+ }
+
+ return node;
+}
+
+MediaElementAudioSourceNode* MediaElementAudioSourceNode::Create(
+ BaseAudioContext* context,
+ const MediaElementAudioSourceOptions& options,
+ ExceptionState& exception_state) {
+ if (!options.hasMediaElement()) {
+ exception_state.ThrowDOMException(kNotFoundError,
+ "mediaElement member is required.");
+ return nullptr;
+ }
+
+ return Create(*context, *options.mediaElement(), exception_state);
+}
+
+void MediaElementAudioSourceNode::Trace(blink::Visitor* visitor) {
+ AudioSourceProviderClient::Trace(visitor);
+ AudioNode::Trace(visitor);
+}
+
+MediaElementAudioSourceHandler&
+MediaElementAudioSourceNode::GetMediaElementAudioSourceHandler() const {
+ return static_cast<MediaElementAudioSourceHandler&>(Handler());
+}
+
+HTMLMediaElement* MediaElementAudioSourceNode::mediaElement() const {
+ return GetMediaElementAudioSourceHandler().MediaElement();
+}
+
+void MediaElementAudioSourceNode::SetFormat(size_t number_of_channels,
+ float sample_rate) {
+ GetMediaElementAudioSourceHandler().SetFormat(number_of_channels,
+ sample_rate);
+}
+
+void MediaElementAudioSourceNode::OnCurrentSrcChanged(const KURL& current_src) {
+ GetMediaElementAudioSourceHandler().OnCurrentSrcChanged(current_src);
+}
+
+void MediaElementAudioSourceNode::lock() {
+ GetMediaElementAudioSourceHandler().lock();
+}
+
+void MediaElementAudioSourceNode::unlock() {
+ GetMediaElementAudioSourceHandler().unlock();
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/media_element_audio_source_node.h b/chromium/third_party/blink/renderer/modules/webaudio/media_element_audio_source_node.h
new file mode 100644
index 00000000000..ab634c273de
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/media_element_audio_source_node.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2011, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_MEDIA_ELEMENT_AUDIO_SOURCE_NODE_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_MEDIA_ELEMENT_AUDIO_SOURCE_NODE_H_
+
+#include <memory>
+#include "base/memory/scoped_refptr.h"
+#include "base/single_thread_task_runner.h"
+#include "base/thread_annotations.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node.h"
+#include "third_party/blink/renderer/platform/audio/audio_source_provider_client.h"
+#include "third_party/blink/renderer/platform/audio/multi_channel_resampler.h"
+#include "third_party/blink/renderer/platform/wtf/threading_primitives.h"
+
+namespace blink {
+
+class BaseAudioContext;
+class HTMLMediaElement;
+class MediaElementAudioSourceOptions;
+
+class MediaElementAudioSourceHandler final : public AudioHandler {
+ public:
+ static scoped_refptr<MediaElementAudioSourceHandler> Create(
+ AudioNode&,
+ HTMLMediaElement&);
+ ~MediaElementAudioSourceHandler() override;
+
+ HTMLMediaElement* MediaElement() const;
+
+ // AudioHandler
+ void Dispose() override;
+ void Process(size_t frames_to_process) override;
+
+ // AudioNode
+ double TailTime() const override { return 0; }
+ double LatencyTime() const override { return 0; }
+
+ // Helpers for AudioSourceProviderClient implementation of
+ // MediaElementAudioSourceNode.
+ void SetFormat(size_t number_of_channels, float sample_rate);
+ void OnCurrentSrcChanged(const KURL& current_src);
+ void lock() EXCLUSIVE_LOCK_FUNCTION(GetProcessLock());
+ void unlock() UNLOCK_FUNCTION(GetProcessLock());
+
+ // For thread safety analysis only. Does not actually return mu.
+ Mutex* GetProcessLock() LOCK_RETURNED(process_lock_) {
+ NOTREACHED();
+ return nullptr;
+ }
+
+ bool RequiresTailProcessing() const final { return false; }
+
+ private:
+ MediaElementAudioSourceHandler(AudioNode&, HTMLMediaElement&);
+ // As an audio source, we will never propagate silence.
+ bool PropagatesSilence() const override { return false; }
+
+ // Must be called only on the audio thread.
+ bool PassesCORSAccessCheck();
+
+ // Must be called only on the main thread.
+ bool PassesCurrentSrcCORSAccessCheck(const KURL& current_src);
+
+ // Print warning if CORS restrictions cause MediaElementAudioSource to output
+ // zeroes.
+ void PrintCORSMessage(const String& message);
+
+ // This Persistent doesn't make a reference cycle. The reference from
+ // HTMLMediaElement to AudioSourceProvideClient, which
+ // MediaElementAudioSourceNode implements, is weak.
+ //
+ // It is accessed by both audio and main thread. TODO: we really should
+ // try to minimize or avoid the audio thread touching this element.
+ CrossThreadPersistent<HTMLMediaElement> media_element_;
+ Mutex process_lock_;
+
+ unsigned source_number_of_channels_;
+ double source_sample_rate_;
+
+ std::unique_ptr<MultiChannelResampler> multi_channel_resampler_;
+
+ // |m_passesCurrentSrcCORSAccessCheck| holds the value of
+ // context()->getSecurityOrigin() &&
+ // context()->getSecurityOrigin()->canRequest(mediaElement()->currentSrc()),
+ // updated in the ctor and onCurrentSrcChanged() on the main thread and used
+ // in passesCORSAccessCheck() on the audio thread, protected by
+ // |m_processLock|.
+ bool passes_current_src_cors_access_check_;
+
+ // Indicates if we need to print a CORS message if the current source has
+ // changed and we have no access to it. Must be protected by |m_processLock|.
+ bool maybe_print_cors_message_;
+
+ // The value of mediaElement()->currentSrc().string() in the ctor and
+ // onCurrentSrcChanged(). Protected by |m_processLock|.
+ String current_src_string_;
+
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+};
+
+class MediaElementAudioSourceNode final : public AudioNode,
+ public AudioSourceProviderClient {
+ DEFINE_WRAPPERTYPEINFO();
+ USING_GARBAGE_COLLECTED_MIXIN(MediaElementAudioSourceNode);
+
+ public:
+ static MediaElementAudioSourceNode* Create(BaseAudioContext&,
+ HTMLMediaElement&,
+ ExceptionState&);
+ static MediaElementAudioSourceNode* Create(
+ BaseAudioContext*,
+ const MediaElementAudioSourceOptions&,
+ ExceptionState&);
+
+ virtual void Trace(blink::Visitor*);
+ MediaElementAudioSourceHandler& GetMediaElementAudioSourceHandler() const;
+
+ HTMLMediaElement* mediaElement() const;
+
+ // AudioSourceProviderClient functions:
+ void SetFormat(size_t number_of_channels, float sample_rate) override;
+ void OnCurrentSrcChanged(const KURL& current_src) override;
+ void lock() override EXCLUSIVE_LOCK_FUNCTION(
+ GetMediaElementAudioSourceHandler().GetProcessLock());
+ void unlock() override
+ UNLOCK_FUNCTION(GetMediaElementAudioSourceHandler().GetProcessLock());
+
+ private:
+ MediaElementAudioSourceNode(BaseAudioContext&, HTMLMediaElement&);
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_MEDIA_ELEMENT_AUDIO_SOURCE_NODE_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/media_element_audio_source_node.idl b/chromium/third_party/blink/renderer/modules/webaudio/media_element_audio_source_node.idl
new file mode 100644
index 00000000000..8a1df10e151
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/media_element_audio_source_node.idl
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2011, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+// See https://webaudio.github.io/web-audio-api/#mediaelementaudiosourcenode
+[
+ // TODO(rtoy): This should be AudioContext, not BaseAudioContext.
+ Constructor(BaseAudioContext context, MediaElementAudioSourceOptions options),
+ RaisesException=Constructor,
+ Measure
+]
+interface MediaElementAudioSourceNode : AudioNode {
+ [SameObject] readonly attribute HTMLMediaElement mediaElement;
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/media_element_audio_source_options.idl b/chromium/third_party/blink/renderer/modules/webaudio/media_element_audio_source_options.idl
new file mode 100644
index 00000000000..074ed840cf4
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/media_element_audio_source_options.idl
@@ -0,0 +1,8 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See https://webaudio.github.io/web-audio-api/#dictdef-mediaelementaudiosourceoptions
+dictionary MediaElementAudioSourceOptions {
+ required HTMLMediaElement mediaElement;
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_destination_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_destination_node.cc
new file mode 100644
index 00000000000..ef84d6b2a0f
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_destination_node.cc
@@ -0,0 +1,191 @@
+/*
+ * Copyright (C) 2012, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#include "third_party/blink/public/platform/web_rtc_peer_connection_handler.h"
+#include "third_party/blink/renderer/bindings/core/v8/exception_messages.h"
+#include "third_party/blink/renderer/bindings/core/v8/exception_state.h"
+#include "third_party/blink/renderer/core/dom/exception_code.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node_input.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node_options.h"
+#include "third_party/blink/renderer/modules/webaudio/base_audio_context.h"
+#include "third_party/blink/renderer/modules/webaudio/media_stream_audio_destination_node.h"
+#include "third_party/blink/renderer/platform/mediastream/media_stream_center.h"
+#include "third_party/blink/renderer/platform/uuid.h"
+#include "third_party/blink/renderer/platform/wtf/locker.h"
+
+namespace blink {
+
+// WebAudioCapturerSource ignores the channel count beyond 8, so we set the
+// block here to avoid anything can cause the crash.
+static const unsigned long kMaxChannelCount = 8;
+
+MediaStreamAudioDestinationHandler::MediaStreamAudioDestinationHandler(
+ AudioNode& node,
+ size_t number_of_channels)
+ : AudioBasicInspectorHandler(kNodeTypeMediaStreamAudioDestination,
+ node,
+ node.context()->sampleRate(),
+ number_of_channels),
+ mix_bus_(AudioBus::Create(number_of_channels,
+ AudioUtilities::kRenderQuantumFrames)) {
+ source_ = MediaStreamSource::Create("WebAudio-" + CreateCanonicalUUIDString(),
+ MediaStreamSource::kTypeAudio,
+ "MediaStreamAudioDestinationNode", false,
+ MediaStreamSource::kReadyStateLive, true);
+ MediaStreamSourceVector audio_sources;
+ audio_sources.push_back(source_.Get());
+ MediaStreamSourceVector video_sources;
+ stream_ = MediaStream::Create(
+ node.context()->GetExecutionContext(),
+ MediaStreamDescriptor::Create(audio_sources, video_sources));
+ MediaStreamCenter::Instance().DidCreateMediaStreamAndTracks(
+ stream_->Descriptor());
+
+ source_->SetAudioFormat(number_of_channels, node.context()->sampleRate());
+
+ SetInternalChannelCountMode(kExplicit);
+ Initialize();
+}
+
+scoped_refptr<MediaStreamAudioDestinationHandler>
+MediaStreamAudioDestinationHandler::Create(AudioNode& node,
+ size_t number_of_channels) {
+ return base::AdoptRef(
+ new MediaStreamAudioDestinationHandler(node, number_of_channels));
+}
+
+MediaStreamAudioDestinationHandler::~MediaStreamAudioDestinationHandler() {
+ Uninitialize();
+}
+
+void MediaStreamAudioDestinationHandler::Process(size_t number_of_frames) {
+ // Conform the input bus into the internal mix bus, which represents
+ // MediaStreamDestination's channel count.
+
+ // Synchronize with possible dynamic changes to the channel count.
+ MutexTryLocker try_locker(process_lock_);
+
+ // If we can get the lock, we can process normally by updating the
+ // mix bus to a new channel count, if needed. If not, just use the
+ // old mix bus to do the mixing; we'll update the bus next time
+ // around.
+ if (try_locker.Locked()) {
+ unsigned count = ChannelCount();
+ if (count != mix_bus_->NumberOfChannels()) {
+ mix_bus_ = AudioBus::Create(count, AudioUtilities::kRenderQuantumFrames);
+ // setAudioFormat has an internal lock. This can cause audio to
+ // glitch. This is outside of our control.
+ source_->SetAudioFormat(count, Context()->sampleRate());
+ }
+ }
+
+ mix_bus_->CopyFrom(*Input(0).Bus());
+
+ // consumeAudio has an internal lock (also used by setAudioFormat).
+ // This can cause audio to glitch. This is outside of our control.
+ source_->ConsumeAudio(mix_bus_.get(), number_of_frames);
+}
+
+void MediaStreamAudioDestinationHandler::SetChannelCount(
+ unsigned long channel_count,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ // Currently the maximum channel count supported for this node is 8,
+ // which is constrained by m_source (WebAudioCapturereSource). Although
+ // it has its own safety check for the excessive channels, throwing an
+ // exception here is useful to developers.
+ if (channel_count < 1 || channel_count > MaxChannelCount()) {
+ exception_state.ThrowDOMException(
+ kNotSupportedError,
+ ExceptionMessages::IndexOutsideRange<unsigned>(
+ "channel count", channel_count, 1,
+ ExceptionMessages::kInclusiveBound, MaxChannelCount(),
+ ExceptionMessages::kInclusiveBound));
+ return;
+ }
+
+ // Synchronize changes in the channel count with process() which
+ // needs to update m_mixBus.
+ MutexLocker locker(process_lock_);
+
+ AudioHandler::SetChannelCount(channel_count, exception_state);
+}
+
+unsigned long MediaStreamAudioDestinationHandler::MaxChannelCount() const {
+ return kMaxChannelCount;
+}
+
+// ----------------------------------------------------------------
+
+MediaStreamAudioDestinationNode::MediaStreamAudioDestinationNode(
+ BaseAudioContext& context,
+ size_t number_of_channels)
+ : AudioBasicInspectorNode(context) {
+ SetHandler(
+ MediaStreamAudioDestinationHandler::Create(*this, number_of_channels));
+}
+
+MediaStreamAudioDestinationNode* MediaStreamAudioDestinationNode::Create(
+ BaseAudioContext& context,
+ size_t number_of_channels,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ if (context.IsContextClosed()) {
+ context.ThrowExceptionForClosedState(exception_state);
+ return nullptr;
+ }
+
+ return new MediaStreamAudioDestinationNode(context, number_of_channels);
+}
+
+MediaStreamAudioDestinationNode* MediaStreamAudioDestinationNode::Create(
+ BaseAudioContext* context,
+ const AudioNodeOptions& options,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ // Default to stereo; |options| will update it approriately if needed.
+ MediaStreamAudioDestinationNode* node =
+ new MediaStreamAudioDestinationNode(*context, 2);
+
+ // Need to handle channelCount here ourselves because the upper
+ // limit is different from the normal AudioNode::setChannelCount
+ // limit of 32. Error messages will sometimes show the wrong
+ // limits.
+ if (options.hasChannelCount())
+ node->setChannelCount(options.channelCount(), exception_state);
+
+ node->HandleChannelOptions(options, exception_state);
+
+ return node;
+}
+
+MediaStream* MediaStreamAudioDestinationNode::stream() const {
+ return static_cast<MediaStreamAudioDestinationHandler&>(Handler()).Stream();
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_destination_node.h b/chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_destination_node.h
new file mode 100644
index 00000000000..b4a5710cbc5
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_destination_node.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2012, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_MEDIA_STREAM_AUDIO_DESTINATION_NODE_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_MEDIA_STREAM_AUDIO_DESTINATION_NODE_H_
+
+#include "base/memory/scoped_refptr.h"
+#include "third_party/blink/renderer/modules/mediastream/media_stream.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_basic_inspector_node.h"
+#include "third_party/blink/renderer/platform/audio/audio_bus.h"
+
+namespace blink {
+
+class BaseAudioContext;
+
+class MediaStreamAudioDestinationHandler final
+ : public AudioBasicInspectorHandler {
+ public:
+ static scoped_refptr<MediaStreamAudioDestinationHandler> Create(
+ AudioNode&,
+ size_t number_of_channels);
+ ~MediaStreamAudioDestinationHandler() override;
+
+ MediaStream* Stream() { return stream_.Get(); }
+
+ // AudioHandler.
+ void Process(size_t frames_to_process) override;
+ void SetChannelCount(unsigned long, ExceptionState&) override;
+
+ unsigned long MaxChannelCount() const;
+
+ bool RequiresTailProcessing() const final { return false; }
+
+ private:
+ MediaStreamAudioDestinationHandler(AudioNode&, size_t number_of_channels);
+ // As an audio source, we will never propagate silence.
+ bool PropagatesSilence() const override { return false; }
+
+ // This Persistent doesn't make a reference cycle.
+ Persistent<MediaStream> stream_;
+ // Accessed by main thread and during audio thread processing.
+ //
+ // TODO: try to avoid such access during audio thread processing.
+ CrossThreadPersistent<MediaStreamSource> source_;
+
+ // This synchronizes dynamic changes to the channel count with
+ // process() to manage the mix bus.
+ mutable Mutex process_lock_;
+
+ // This internal mix bus is for up/down mixing the input to the actual
+ // number of channels in the destination.
+ scoped_refptr<AudioBus> mix_bus_;
+};
+
+class MediaStreamAudioDestinationNode final : public AudioBasicInspectorNode {
+ DEFINE_WRAPPERTYPEINFO();
+
+ public:
+ static MediaStreamAudioDestinationNode* Create(BaseAudioContext&,
+ size_t number_of_channels,
+ ExceptionState&);
+ static MediaStreamAudioDestinationNode* Create(BaseAudioContext*,
+ const AudioNodeOptions&,
+ ExceptionState&);
+
+ MediaStream* stream() const;
+
+ private:
+ MediaStreamAudioDestinationNode(BaseAudioContext&, size_t number_of_channels);
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_MEDIA_STREAM_AUDIO_DESTINATION_NODE_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_destination_node.idl b/chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_destination_node.idl
new file mode 100644
index 00000000000..7cc91d87cc3
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_destination_node.idl
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2012, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+// See https://webaudio.github.io/web-audio-api/#mediastreamaudiodestinationnode
+[
+ Constructor(BaseAudioContext context, optional AudioNodeOptions options),
+ RaisesException=Constructor,
+ Measure
+]
+interface MediaStreamAudioDestinationNode : AudioNode {
+ readonly attribute MediaStream stream;
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_source_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_source_node.cc
new file mode 100644
index 00000000000..52aaf0bb0f8
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_source_node.cc
@@ -0,0 +1,201 @@
+/*
+ * Copyright (C) 2012, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#include "third_party/blink/renderer/modules/webaudio/media_stream_audio_source_node.h"
+
+#include <memory>
+#include "third_party/blink/renderer/bindings/core/v8/exception_state.h"
+#include "third_party/blink/renderer/core/dom/exception_code.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node_output.h"
+#include "third_party/blink/renderer/modules/webaudio/base_audio_context.h"
+#include "third_party/blink/renderer/modules/webaudio/media_stream_audio_source_options.h"
+#include "third_party/blink/renderer/platform/wtf/locker.h"
+
+namespace blink {
+
+MediaStreamAudioSourceHandler::MediaStreamAudioSourceHandler(
+ AudioNode& node,
+ std::unique_ptr<AudioSourceProvider> audio_source_provider)
+ : AudioHandler(kNodeTypeMediaStreamAudioSource,
+ node,
+ node.context()->sampleRate()),
+ audio_source_provider_(std::move(audio_source_provider)),
+ source_number_of_channels_(0) {
+ // Default to stereo. This could change depending on the format of the
+ // MediaStream's audio track.
+ AddOutput(2);
+
+ Initialize();
+}
+
+scoped_refptr<MediaStreamAudioSourceHandler>
+MediaStreamAudioSourceHandler::Create(
+ AudioNode& node,
+ std::unique_ptr<AudioSourceProvider> audio_source_provider) {
+ return base::AdoptRef(new MediaStreamAudioSourceHandler(
+ node, std::move(audio_source_provider)));
+}
+
+MediaStreamAudioSourceHandler::~MediaStreamAudioSourceHandler() {
+ Uninitialize();
+}
+
+void MediaStreamAudioSourceHandler::SetFormat(size_t number_of_channels,
+ float source_sample_rate) {
+ if (number_of_channels != source_number_of_channels_ ||
+ source_sample_rate != Context()->sampleRate()) {
+ // The sample-rate must be equal to the context's sample-rate.
+ if (!number_of_channels ||
+ number_of_channels > BaseAudioContext::MaxNumberOfChannels() ||
+ source_sample_rate != Context()->sampleRate()) {
+ // process() will generate silence for these uninitialized values.
+ DLOG(ERROR) << "setFormat(" << number_of_channels << ", "
+ << source_sample_rate << ") - unhandled format change";
+ source_number_of_channels_ = 0;
+ return;
+ }
+
+ // Synchronize with process().
+ MutexLocker locker(process_lock_);
+
+ source_number_of_channels_ = number_of_channels;
+
+ {
+ // The context must be locked when changing the number of output channels.
+ BaseAudioContext::GraphAutoLocker context_locker(Context());
+
+ // Do any necesssary re-configuration to the output's number of channels.
+ Output(0).SetNumberOfChannels(number_of_channels);
+ }
+ }
+}
+
+void MediaStreamAudioSourceHandler::Process(size_t number_of_frames) {
+ AudioBus* output_bus = Output(0).Bus();
+
+ if (!GetAudioSourceProvider()) {
+ output_bus->Zero();
+ return;
+ }
+
+ if (source_number_of_channels_ != output_bus->NumberOfChannels()) {
+ output_bus->Zero();
+ return;
+ }
+
+ // Use a tryLock() to avoid contention in the real-time audio thread.
+ // If we fail to acquire the lock then the MediaStream must be in the middle
+ // of a format change, so we output silence in this case.
+ MutexTryLocker try_locker(process_lock_);
+ if (try_locker.Locked()) {
+ GetAudioSourceProvider()->ProvideInput(output_bus, number_of_frames);
+ } else {
+ // We failed to acquire the lock.
+ output_bus->Zero();
+ }
+}
+
+// ----------------------------------------------------------------
+
+MediaStreamAudioSourceNode::MediaStreamAudioSourceNode(
+ BaseAudioContext& context,
+ MediaStream& media_stream,
+ MediaStreamTrack* audio_track,
+ std::unique_ptr<AudioSourceProvider> audio_source_provider)
+ : AudioNode(context),
+ audio_track_(audio_track),
+ media_stream_(media_stream) {
+ SetHandler(MediaStreamAudioSourceHandler::Create(
+ *this, std::move(audio_source_provider)));
+}
+
+MediaStreamAudioSourceNode* MediaStreamAudioSourceNode::Create(
+ BaseAudioContext& context,
+ MediaStream& media_stream,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ if (context.IsContextClosed()) {
+ context.ThrowExceptionForClosedState(exception_state);
+ return nullptr;
+ }
+
+ MediaStreamTrackVector audio_tracks = media_stream.getAudioTracks();
+ if (audio_tracks.IsEmpty()) {
+ exception_state.ThrowDOMException(kInvalidStateError,
+ "MediaStream has no audio track");
+ return nullptr;
+ }
+
+ // Use the first audio track in the media stream.
+ MediaStreamTrack* audio_track = audio_tracks[0];
+ std::unique_ptr<AudioSourceProvider> provider =
+ audio_track->CreateWebAudioSource();
+
+ MediaStreamAudioSourceNode* node = new MediaStreamAudioSourceNode(
+ context, media_stream, audio_track, std::move(provider));
+
+ if (!node)
+ return nullptr;
+
+ // TODO(hongchan): Only stereo streams are supported right now. We should be
+ // able to accept multi-channel streams.
+ node->SetFormat(2, context.sampleRate());
+ // context keeps reference until node is disconnected
+ context.NotifySourceNodeStartedProcessing(node);
+
+ return node;
+}
+
+MediaStreamAudioSourceNode* MediaStreamAudioSourceNode::Create(
+ BaseAudioContext* context,
+ const MediaStreamAudioSourceOptions& options,
+ ExceptionState& exception_state) {
+ return Create(*context, *options.mediaStream(), exception_state);
+}
+
+void MediaStreamAudioSourceNode::Trace(blink::Visitor* visitor) {
+ visitor->Trace(audio_track_);
+ visitor->Trace(media_stream_);
+ AudioSourceProviderClient::Trace(visitor);
+ AudioNode::Trace(visitor);
+}
+
+MediaStreamAudioSourceHandler&
+MediaStreamAudioSourceNode::GetMediaStreamAudioSourceHandler() const {
+ return static_cast<MediaStreamAudioSourceHandler&>(Handler());
+}
+
+MediaStream* MediaStreamAudioSourceNode::getMediaStream() const {
+ return media_stream_;
+}
+
+void MediaStreamAudioSourceNode::SetFormat(size_t number_of_channels,
+ float source_sample_rate) {
+ GetMediaStreamAudioSourceHandler().SetFormat(number_of_channels,
+ source_sample_rate);
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_source_node.h b/chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_source_node.h
new file mode 100644
index 00000000000..b9f4fc99998
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_source_node.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2012, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_MEDIA_STREAM_AUDIO_SOURCE_NODE_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_MEDIA_STREAM_AUDIO_SOURCE_NODE_H_
+
+#include <memory>
+#include "base/memory/scoped_refptr.h"
+#include "third_party/blink/renderer/modules/mediastream/media_stream.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node.h"
+#include "third_party/blink/renderer/platform/audio/audio_source_provider.h"
+#include "third_party/blink/renderer/platform/audio/audio_source_provider_client.h"
+#include "third_party/blink/renderer/platform/wtf/threading.h"
+
+namespace blink {
+
+class BaseAudioContext;
+class MediaStreamAudioSourceOptions;
+
+class MediaStreamAudioSourceHandler final : public AudioHandler {
+ public:
+ static scoped_refptr<MediaStreamAudioSourceHandler> Create(
+ AudioNode&,
+ std::unique_ptr<AudioSourceProvider>);
+ ~MediaStreamAudioSourceHandler() override;
+
+ // AudioHandler
+ void Process(size_t frames_to_process) override;
+
+ // AudioNode
+ double TailTime() const override { return 0; }
+ double LatencyTime() const override { return 0; }
+
+ // A helper for AudioSourceProviderClient implementation of
+ // MediaStreamAudioSourceNode.
+ void SetFormat(size_t number_of_channels, float sample_rate);
+
+ bool RequiresTailProcessing() const final { return false; }
+
+ private:
+ MediaStreamAudioSourceHandler(AudioNode&,
+ std::unique_ptr<AudioSourceProvider>);
+
+ // As an audio source, we will never propagate silence.
+ bool PropagatesSilence() const override { return false; }
+
+ AudioSourceProvider* GetAudioSourceProvider() const {
+ return audio_source_provider_.get();
+ }
+
+ std::unique_ptr<AudioSourceProvider> audio_source_provider_;
+
+ Mutex process_lock_;
+
+ unsigned source_number_of_channels_;
+};
+
+class MediaStreamAudioSourceNode final : public AudioNode,
+ public AudioSourceProviderClient {
+ DEFINE_WRAPPERTYPEINFO();
+ USING_GARBAGE_COLLECTED_MIXIN(MediaStreamAudioSourceNode);
+
+ public:
+ static MediaStreamAudioSourceNode* Create(BaseAudioContext&,
+ MediaStream&,
+ ExceptionState&);
+ static MediaStreamAudioSourceNode* Create(
+ BaseAudioContext*,
+ const MediaStreamAudioSourceOptions&,
+ ExceptionState&);
+
+ virtual void Trace(blink::Visitor*);
+
+ MediaStream* getMediaStream() const;
+
+ // AudioSourceProviderClient functions:
+ void SetFormat(size_t number_of_channels, float sample_rate) override;
+
+ private:
+ MediaStreamAudioSourceNode(BaseAudioContext&,
+ MediaStream&,
+ MediaStreamTrack*,
+ std::unique_ptr<AudioSourceProvider>);
+
+ MediaStreamAudioSourceHandler& GetMediaStreamAudioSourceHandler() const;
+
+ Member<MediaStreamTrack> audio_track_;
+ Member<MediaStream> media_stream_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_MEDIA_STREAM_AUDIO_SOURCE_NODE_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_source_node.idl b/chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_source_node.idl
new file mode 100644
index 00000000000..1e8713dd726
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_source_node.idl
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2012, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+// See https://webaudio.github.io/web-audio-api/#mediastreamaudiosourcenode
+[
+ Constructor(BaseAudioContext context, MediaStreamAudioSourceOptions options),
+ RaisesException=Constructor,
+ Measure
+]
+interface MediaStreamAudioSourceNode : AudioNode {
+ [SameObject, ImplementedAs=getMediaStream] readonly attribute MediaStream mediaStream;
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_source_options.idl b/chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_source_options.idl
new file mode 100644
index 00000000000..f6762af8ad7
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/media_stream_audio_source_options.idl
@@ -0,0 +1,8 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See https://webaudio.github.io/web-audio-api/#dictdef-mediastreamaudiosourceoptions
+dictionary MediaStreamAudioSourceOptions {
+ required MediaStream mediaStream;
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_completion_event.cc b/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_completion_event.cc
new file mode 100644
index 00000000000..8156266c23e
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_completion_event.cc
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2011, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#include "third_party/blink/renderer/modules/webaudio/offline_audio_completion_event.h"
+
+namespace blink {
+
+OfflineAudioCompletionEvent* OfflineAudioCompletionEvent::Create() {
+ return new OfflineAudioCompletionEvent;
+}
+
+OfflineAudioCompletionEvent* OfflineAudioCompletionEvent::Create(
+ AudioBuffer* rendered_buffer) {
+ return new OfflineAudioCompletionEvent(rendered_buffer);
+}
+
+OfflineAudioCompletionEvent* OfflineAudioCompletionEvent::Create(
+ const AtomicString& event_type,
+ const OfflineAudioCompletionEventInit& event_init) {
+ return new OfflineAudioCompletionEvent(event_type, event_init);
+}
+
+OfflineAudioCompletionEvent::OfflineAudioCompletionEvent() = default;
+
+OfflineAudioCompletionEvent::OfflineAudioCompletionEvent(
+ AudioBuffer* rendered_buffer)
+ : Event(EventTypeNames::complete, Bubbles::kYes, Cancelable::kNo),
+ rendered_buffer_(rendered_buffer) {}
+
+OfflineAudioCompletionEvent::OfflineAudioCompletionEvent(
+ const AtomicString& event_type,
+ const OfflineAudioCompletionEventInit& event_init)
+ : Event(event_type, event_init) {
+ rendered_buffer_ = event_init.renderedBuffer();
+}
+
+OfflineAudioCompletionEvent::~OfflineAudioCompletionEvent() = default;
+
+const AtomicString& OfflineAudioCompletionEvent::InterfaceName() const {
+ return EventNames::OfflineAudioCompletionEvent;
+}
+
+void OfflineAudioCompletionEvent::Trace(blink::Visitor* visitor) {
+ visitor->Trace(rendered_buffer_);
+ Event::Trace(visitor);
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_completion_event.h b/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_completion_event.h
new file mode 100644
index 00000000000..f3c892dadff
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_completion_event.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2011, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_OFFLINE_AUDIO_COMPLETION_EVENT_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_OFFLINE_AUDIO_COMPLETION_EVENT_H_
+
+#include "base/memory/scoped_refptr.h"
+#include "third_party/blink/renderer/modules/event_modules.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_buffer.h"
+#include "third_party/blink/renderer/modules/webaudio/offline_audio_completion_event_init.h"
+
+namespace blink {
+
+class AudioBuffer;
+class OfflineAudioCompletionEventInit;
+
+class OfflineAudioCompletionEvent final : public Event {
+ DEFINE_WRAPPERTYPEINFO();
+
+ public:
+ static OfflineAudioCompletionEvent* Create();
+ static OfflineAudioCompletionEvent* Create(AudioBuffer* rendered_buffer);
+ static OfflineAudioCompletionEvent* Create(
+ const AtomicString& type,
+ const OfflineAudioCompletionEventInit&);
+
+ ~OfflineAudioCompletionEvent() override;
+
+ AudioBuffer* renderedBuffer() { return rendered_buffer_.Get(); }
+
+ const AtomicString& InterfaceName() const override;
+
+ virtual void Trace(blink::Visitor*);
+
+ private:
+ OfflineAudioCompletionEvent();
+ explicit OfflineAudioCompletionEvent(AudioBuffer* rendered_buffer);
+ explicit OfflineAudioCompletionEvent(const AtomicString& type,
+ const OfflineAudioCompletionEventInit&);
+
+ Member<AudioBuffer> rendered_buffer_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_OFFLINE_AUDIO_COMPLETION_EVENT_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_completion_event.idl b/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_completion_event.idl
new file mode 100644
index 00000000000..368ff9df458
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_completion_event.idl
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2011, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+// See https://webaudio.github.io/web-audio-api/#offlineaudiocompletionevent
+[
+ Constructor(DOMString type, OfflineAudioCompletionEventInit eventInitDict)
+]
+interface OfflineAudioCompletionEvent : Event {
+ readonly attribute AudioBuffer renderedBuffer;
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_completion_event_init.idl b/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_completion_event_init.idl
new file mode 100644
index 00000000000..fe37190239a
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_completion_event_init.idl
@@ -0,0 +1,8 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See https://webaudio.github.io/web-audio-api/#dictdef-offlineaudiocompletioneventinit
+dictionary OfflineAudioCompletionEventInit : EventInit {
+ required AudioBuffer renderedBuffer;
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_context.cc b/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_context.cc
new file mode 100644
index 00000000000..4dfc8e3f7bc
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_context.cc
@@ -0,0 +1,471 @@
+/*
+ * Copyright (C) 2012, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#include "third_party/blink/renderer/bindings/core/v8/exception_messages.h"
+#include "third_party/blink/renderer/bindings/core/v8/exception_state.h"
+#include "third_party/blink/renderer/core/dom/document.h"
+#include "third_party/blink/renderer/core/dom/dom_exception.h"
+#include "third_party/blink/renderer/core/dom/exception_code.h"
+#include "third_party/blink/renderer/core/execution_context/execution_context.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_listener.h"
+#include "third_party/blink/renderer/modules/webaudio/deferred_task_handler.h"
+#include "third_party/blink/renderer/modules/webaudio/offline_audio_completion_event.h"
+#include "third_party/blink/renderer/modules/webaudio/offline_audio_context.h"
+#include "third_party/blink/renderer/modules/webaudio/offline_audio_context_options.h"
+#include "third_party/blink/renderer/modules/webaudio/offline_audio_destination_node.h"
+#include "third_party/blink/renderer/platform/bindings/script_state.h"
+
+#include "third_party/blink/public/platform/platform.h"
+#include "third_party/blink/renderer/platform/audio/audio_utilities.h"
+#include "third_party/blink/renderer/platform/cross_thread_functional.h"
+#include "third_party/blink/renderer/platform/histogram.h"
+
+namespace blink {
+
+OfflineAudioContext* OfflineAudioContext::Create(
+ ExecutionContext* context,
+ unsigned number_of_channels,
+ unsigned number_of_frames,
+ float sample_rate,
+ ExceptionState& exception_state) {
+ // FIXME: add support for workers.
+ if (!context || !context->IsDocument()) {
+ exception_state.ThrowDOMException(kNotSupportedError,
+ "Workers are not supported.");
+ return nullptr;
+ }
+
+ Document* document = ToDocument(context);
+
+ if (!number_of_frames) {
+ exception_state.ThrowDOMException(
+ kNotSupportedError,
+ ExceptionMessages::IndexExceedsMinimumBound<unsigned>(
+ "number of frames", number_of_frames, 1));
+ return nullptr;
+ }
+
+ if (number_of_channels == 0 ||
+ number_of_channels > BaseAudioContext::MaxNumberOfChannels()) {
+ exception_state.ThrowDOMException(
+ kNotSupportedError, ExceptionMessages::IndexOutsideRange<unsigned>(
+ "number of channels", number_of_channels, 1,
+ ExceptionMessages::kInclusiveBound,
+ BaseAudioContext::MaxNumberOfChannels(),
+ ExceptionMessages::kInclusiveBound));
+ return nullptr;
+ }
+
+ if (!AudioUtilities::IsValidAudioBufferSampleRate(sample_rate)) {
+ exception_state.ThrowDOMException(
+ kNotSupportedError, ExceptionMessages::IndexOutsideRange(
+ "sampleRate", sample_rate,
+ AudioUtilities::MinAudioBufferSampleRate(),
+ ExceptionMessages::kInclusiveBound,
+ AudioUtilities::MaxAudioBufferSampleRate(),
+ ExceptionMessages::kInclusiveBound));
+ return nullptr;
+ }
+
+ OfflineAudioContext* audio_context =
+ new OfflineAudioContext(document, number_of_channels, number_of_frames,
+ sample_rate, exception_state);
+ audio_context->PauseIfNeeded();
+
+#if DEBUG_AUDIONODE_REFERENCES
+ fprintf(stderr, "[%16p]: OfflineAudioContext::OfflineAudioContext()\n",
+ audio_context);
+#endif
+ DEFINE_STATIC_LOCAL(SparseHistogram, offline_context_channel_count_histogram,
+ ("WebAudio.OfflineAudioContext.ChannelCount"));
+ // Arbitrarly limit the maximum length to 1 million frames (about 20 sec
+ // at 48kHz). The number of buckets is fairly arbitrary.
+ DEFINE_STATIC_LOCAL(CustomCountHistogram, offline_context_length_histogram,
+ ("WebAudio.OfflineAudioContext.Length", 1, 1000000, 50));
+ // The limits are the min and max AudioBuffer sample rates currently
+ // supported. We use explicit values here instead of
+ // AudioUtilities::minAudioBufferSampleRate() and
+ // AudioUtilities::maxAudioBufferSampleRate(). The number of buckets is
+ // fairly arbitrary.
+ DEFINE_STATIC_LOCAL(
+ CustomCountHistogram, offline_context_sample_rate_histogram,
+ ("WebAudio.OfflineAudioContext.SampleRate384kHz", 3000, 384000, 50));
+
+ offline_context_channel_count_histogram.Sample(number_of_channels);
+ offline_context_length_histogram.Count(number_of_frames);
+ offline_context_sample_rate_histogram.Count(sample_rate);
+
+ return audio_context;
+}
+
+OfflineAudioContext* OfflineAudioContext::Create(
+ ExecutionContext* context,
+ const OfflineAudioContextOptions& options,
+ ExceptionState& exception_state) {
+ OfflineAudioContext* offline_context =
+ Create(context, options.numberOfChannels(), options.length(),
+ options.sampleRate(), exception_state);
+
+ return offline_context;
+}
+
+OfflineAudioContext::OfflineAudioContext(Document* document,
+ unsigned number_of_channels,
+ size_t number_of_frames,
+ float sample_rate,
+ ExceptionState& exception_state)
+ : BaseAudioContext(document, kOfflineContext),
+ is_rendering_started_(false),
+ total_render_frames_(number_of_frames) {
+ destination_node_ = OfflineAudioDestinationNode::Create(
+ this, number_of_channels, number_of_frames, sample_rate);
+ Initialize();
+}
+
+OfflineAudioContext::~OfflineAudioContext() {
+#if DEBUG_AUDIONODE_REFERENCES
+ fprintf(stderr, "[%16p]: OfflineAudioContext::~OfflineAudioContext()\n",
+ this);
+#endif
+}
+
+void OfflineAudioContext::Trace(blink::Visitor* visitor) {
+ visitor->Trace(complete_resolver_);
+ visitor->Trace(scheduled_suspends_);
+ BaseAudioContext::Trace(visitor);
+}
+
+ScriptPromise OfflineAudioContext::startOfflineRendering(
+ ScriptState* script_state) {
+ DCHECK(IsMainThread());
+
+ // Calling close() on an OfflineAudioContext is not supported/allowed,
+ // but it might well have been stopped by its execution context.
+ //
+ // See: crbug.com/435867
+ if (IsContextClosed()) {
+ return ScriptPromise::RejectWithDOMException(
+ script_state,
+ DOMException::Create(kInvalidStateError,
+ "cannot call startRendering on an "
+ "OfflineAudioContext in a stopped state."));
+ }
+
+ // If the context is not in the suspended state (i.e. running), reject the
+ // promise.
+ if (ContextState() != AudioContextState::kSuspended) {
+ return ScriptPromise::RejectWithDOMException(
+ script_state,
+ DOMException::Create(
+ kInvalidStateError,
+ "cannot startRendering when an OfflineAudioContext is " + state()));
+ }
+
+ // Can't call startRendering more than once. Return a rejected promise now.
+ if (is_rendering_started_) {
+ return ScriptPromise::RejectWithDOMException(
+ script_state,
+ DOMException::Create(kInvalidStateError,
+ "cannot call startRendering more than once"));
+ }
+
+ DCHECK(!is_rendering_started_);
+
+ complete_resolver_ = ScriptPromiseResolver::Create(script_state);
+
+ // Allocate the AudioBuffer to hold the rendered result.
+ float sample_rate = DestinationHandler().SampleRate();
+ unsigned number_of_channels = DestinationHandler().NumberOfChannels();
+
+ AudioBuffer* render_target = AudioBuffer::CreateUninitialized(
+ number_of_channels, total_render_frames_, sample_rate);
+
+ if (!render_target) {
+ return ScriptPromise::RejectWithDOMException(
+ script_state,
+ DOMException::Create(kNotSupportedError,
+ "startRendering failed to create AudioBuffer(" +
+ String::Number(number_of_channels) + ", " +
+ String::Number(total_render_frames_) + ", " +
+ String::Number(sample_rate) + ")"));
+ }
+
+ // Start rendering and return the promise.
+ is_rendering_started_ = true;
+ SetContextState(kRunning);
+ DestinationHandler().InitializeOfflineRenderThread(render_target);
+ DestinationHandler().StartRendering();
+
+ return complete_resolver_->Promise();
+}
+
+ScriptPromise OfflineAudioContext::suspendContext(ScriptState* script_state) {
+ LOG(FATAL) << "This CANNOT be called on OfflineAudioContext; this is only to "
+ "implement the pure virtual interface from BaseAudioContext.";
+ return ScriptPromise();
+}
+
+ScriptPromise OfflineAudioContext::suspendContext(ScriptState* script_state,
+ double when) {
+ DCHECK(IsMainThread());
+
+ ScriptPromiseResolver* resolver = ScriptPromiseResolver::Create(script_state);
+ ScriptPromise promise = resolver->Promise();
+
+ // If the rendering is finished, reject the promise.
+ if (ContextState() == AudioContextState::kClosed) {
+ resolver->Reject(DOMException::Create(kInvalidStateError,
+ "the rendering is already finished"));
+ return promise;
+ }
+
+ // The specified suspend time is negative; reject the promise.
+ if (when < 0) {
+ resolver->Reject(DOMException::Create(
+ kInvalidStateError,
+ "negative suspend time (" + String::Number(when) + ") is not allowed"));
+ return promise;
+ }
+
+ // Quantize (to the lower boundary) the suspend time by the render quantum.
+ size_t frame = when * sampleRate();
+ frame -= frame % DestinationHandler().RenderQuantumFrames();
+
+ // The suspend time should be earlier than the total render frame. If the
+ // requested suspension time is equal to the total render frame, the promise
+ // will be rejected.
+ if (total_render_frames_ <= frame) {
+ resolver->Reject(DOMException::Create(
+ kInvalidStateError,
+ "cannot schedule a suspend at frame " + String::Number(frame) + " (" +
+ String::Number(when) + " seconds) " +
+ "because it is greater than "
+ "or equal to the total "
+ "render duration of " +
+ String::Number(total_render_frames_) + " frames"));
+ return promise;
+ }
+
+ // The specified suspend time is in the past; reject the promise.
+ if (frame < CurrentSampleFrame()) {
+ size_t current_frame_clamped = std::min(CurrentSampleFrame(), length());
+ double current_time_clamped =
+ std::min(currentTime(), length() / static_cast<double>(sampleRate()));
+ resolver->Reject(DOMException::Create(
+ kInvalidStateError,
+ "suspend(" + String::Number(when) + ") failed to suspend at frame " +
+ String::Number(frame) + " because it is earlier than the current " +
+ "frame of " + String::Number(current_frame_clamped) + " (" +
+ String::Number(current_time_clamped) + " seconds)"));
+ return promise;
+ }
+
+ // Wait until the suspend map is available for the insertion. Here we should
+ // use GraphAutoLocker because it locks the graph from the main thread.
+ GraphAutoLocker locker(this);
+
+ // If there is a duplicate suspension at the same quantized frame,
+ // reject the promise.
+ if (scheduled_suspends_.Contains(frame)) {
+ resolver->Reject(DOMException::Create(
+ kInvalidStateError, "cannot schedule more than one suspend at frame " +
+ String::Number(frame) + " (" +
+ String::Number(when) + " seconds)"));
+ return promise;
+ }
+
+ scheduled_suspends_.insert(frame, resolver);
+
+ return promise;
+}
+
+ScriptPromise OfflineAudioContext::resumeContext(ScriptState* script_state) {
+ DCHECK(IsMainThread());
+
+ ScriptPromiseResolver* resolver = ScriptPromiseResolver::Create(script_state);
+ ScriptPromise promise = resolver->Promise();
+
+ // If the rendering has not started, reject the promise.
+ if (!is_rendering_started_) {
+ resolver->Reject(DOMException::Create(
+ kInvalidStateError,
+ "cannot resume an offline context that has not started"));
+ return promise;
+ }
+
+ // If the context is in a closed state, reject the promise.
+ if (ContextState() == AudioContextState::kClosed) {
+ resolver->Reject(DOMException::Create(
+ kInvalidStateError, "cannot resume a closed offline context"));
+ return promise;
+ }
+
+ // If the context is already running, resolve the promise without altering
+ // the current state or starting the rendering loop.
+ if (ContextState() == AudioContextState::kRunning) {
+ resolver->Resolve();
+ return promise;
+ }
+
+ DCHECK_EQ(ContextState(), AudioContextState::kSuspended);
+
+ // If the context is suspended, resume rendering by setting the state to
+ // "Running". and calling startRendering(). Note that resuming is possible
+ // only after the rendering started.
+ SetContextState(kRunning);
+ DestinationHandler().StartRendering();
+
+ // Resolve the promise immediately.
+ resolver->Resolve();
+
+ return promise;
+}
+
+void OfflineAudioContext::FireCompletionEvent() {
+ DCHECK(IsMainThread());
+
+ // Context is finished, so remove any tail processing nodes; there's nowhere
+ // for the output to go.
+ GetDeferredTaskHandler().FinishTailProcessing();
+
+ // We set the state to closed here so that the oncomplete event handler sees
+ // that the context has been closed.
+ SetContextState(kClosed);
+
+ // Avoid firing the event if the document has already gone away.
+ if (GetExecutionContext()) {
+ AudioBuffer* rendered_buffer = DestinationHandler().RenderTarget();
+
+ DCHECK(rendered_buffer);
+ if (!rendered_buffer)
+ return;
+
+ // Call the offline rendering completion event listener and resolve the
+ // promise too.
+ DispatchEvent(OfflineAudioCompletionEvent::Create(rendered_buffer));
+ complete_resolver_->Resolve(rendered_buffer);
+ } else {
+ // The resolver should be rejected when the execution context is gone.
+ complete_resolver_->Reject(DOMException::Create(
+ kInvalidStateError, "the execution context does not exist"));
+ }
+
+ is_rendering_started_ = false;
+}
+
+bool OfflineAudioContext::HandlePreOfflineRenderTasks() {
+ DCHECK(IsAudioThread());
+
+ // OfflineGraphAutoLocker here locks the audio graph for this scope. Note
+ // that this locker does not use tryLock() inside because the timing of
+ // suspension MUST NOT be delayed.
+ OfflineGraphAutoLocker locker(this);
+
+ // Update the dirty state of the listener.
+ listener()->UpdateState();
+
+ GetDeferredTaskHandler().HandleDeferredTasks();
+ HandleStoppableSourceNodes();
+
+ return ShouldSuspend();
+}
+
+void OfflineAudioContext::HandlePostOfflineRenderTasks() {
+ DCHECK(IsAudioThread());
+
+ // OfflineGraphAutoLocker here locks the audio graph for the same reason
+ // above in |handlePreOfflineRenderTasks|.
+ {
+ OfflineGraphAutoLocker locker(this);
+
+ GetDeferredTaskHandler().BreakConnections();
+ GetDeferredTaskHandler().HandleDeferredTasks();
+ GetDeferredTaskHandler().RequestToDeleteHandlersOnMainThread();
+ }
+}
+
+OfflineAudioDestinationHandler& OfflineAudioContext::DestinationHandler() {
+ return static_cast<OfflineAudioDestinationHandler&>(
+ destination()->GetAudioDestinationHandler());
+}
+
+void OfflineAudioContext::ResolveSuspendOnMainThread(size_t frame) {
+ DCHECK(IsMainThread());
+
+ // Suspend the context first. This will fire onstatechange event.
+ SetContextState(kSuspended);
+
+ // Wait until the suspend map is available for the removal.
+ GraphAutoLocker locker(this);
+
+ // If the context is going away, m_scheduledSuspends could have had all its
+ // entries removed. Check for that here.
+ if (scheduled_suspends_.size()) {
+ // |frame| must exist in the map.
+ DCHECK(scheduled_suspends_.Contains(frame));
+
+ SuspendMap::iterator it = scheduled_suspends_.find(frame);
+ it->value->Resolve();
+
+ scheduled_suspends_.erase(it);
+ }
+}
+
+void OfflineAudioContext::RejectPendingResolvers() {
+ DCHECK(IsMainThread());
+
+ // Wait until the suspend map is available for removal.
+ GraphAutoLocker locker(this);
+
+ // Offline context is going away so reject any promises that are still
+ // pending.
+
+ for (auto& pending_suspend_resolver : scheduled_suspends_) {
+ pending_suspend_resolver.value->Reject(DOMException::Create(
+ kInvalidStateError, "Audio context is going away"));
+ }
+
+ scheduled_suspends_.clear();
+ DCHECK_EQ(resume_resolvers_.size(), 0u);
+
+ RejectPendingDecodeAudioDataResolvers();
+}
+
+bool OfflineAudioContext::ShouldSuspend() {
+ DCHECK(IsAudioThread());
+
+ // Note that the GraphLock is required before this check. Since this needs
+ // to run on the audio thread, OfflineGraphAutoLocker must be used.
+ if (scheduled_suspends_.Contains(CurrentSampleFrame()))
+ return true;
+
+ return false;
+}
+
+bool OfflineAudioContext::HasPendingActivity() const {
+ return is_rendering_started_;
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_context.h b/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_context.h
new file mode 100644
index 00000000000..657ffc32ac7
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_context.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2012, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_OFFLINE_AUDIO_CONTEXT_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_OFFLINE_AUDIO_CONTEXT_H_
+
+#include "third_party/blink/renderer/modules/modules_export.h"
+#include "third_party/blink/renderer/modules/webaudio/base_audio_context.h"
+#include "third_party/blink/renderer/platform/wtf/hash_map.h"
+
+namespace blink {
+
+class ExceptionState;
+class OfflineAudioContextOptions;
+class OfflineAudioDestinationHandler;
+
+class MODULES_EXPORT OfflineAudioContext final : public BaseAudioContext {
+ DEFINE_WRAPPERTYPEINFO();
+
+ public:
+ static OfflineAudioContext* Create(ExecutionContext*,
+ unsigned number_of_channels,
+ unsigned number_of_frames,
+ float sample_rate,
+ ExceptionState&);
+
+ static OfflineAudioContext* Create(ExecutionContext*,
+ const OfflineAudioContextOptions&,
+ ExceptionState&);
+
+ ~OfflineAudioContext() override;
+
+ virtual void Trace(blink::Visitor*);
+
+ size_t length() const { return total_render_frames_; }
+
+ ScriptPromise startOfflineRendering(ScriptState*);
+
+ ScriptPromise suspendContext(ScriptState*, double);
+ ScriptPromise resumeContext(ScriptState*) final;
+
+ // This is to implement the pure virtual method from BaseAudioContext.
+ // CANNOT be called from an OfflineAudioContext.
+ ScriptPromise suspendContext(ScriptState*) final;
+
+ void RejectPendingResolvers() override;
+
+ bool HasRealtimeConstraint() final { return false; }
+
+ DEFINE_ATTRIBUTE_EVENT_LISTENER(complete);
+
+ // Fire completion event when the rendering is finished.
+ void FireCompletionEvent();
+
+ // This is same with the online version in BaseAudioContext class except
+ // for returning a boolean value after checking the scheduled suspends.
+ bool HandlePreOfflineRenderTasks();
+
+ void HandlePostOfflineRenderTasks();
+
+ // Resolve a suspend scheduled at the specified frame. With this specified
+ // frame as a unique key, the associated promise resolver can be retrieved
+ // from the map (m_scheduledSuspends) and resolved.
+ void ResolveSuspendOnMainThread(size_t);
+
+ // The HashMap with 'zero' key is needed because |currentSampleFrame| can be
+ // zero.
+ using SuspendMap = HeapHashMap<size_t,
+ Member<ScriptPromiseResolver>,
+ DefaultHash<size_t>::Hash,
+ WTF::UnsignedWithZeroKeyHashTraits<size_t>>;
+
+ using OfflineGraphAutoLocker = DeferredTaskHandler::OfflineGraphAutoLocker;
+
+ // Document notification
+ bool HasPendingActivity() const final;
+
+ private:
+ OfflineAudioContext(Document*,
+ unsigned number_of_channels,
+ size_t number_of_frames,
+ float sample_rate,
+ ExceptionState&);
+
+ // Fetch directly the destination handler.
+ OfflineAudioDestinationHandler& DestinationHandler();
+
+ // Check if the rendering needs to be suspended.
+ bool ShouldSuspend();
+
+ // This map is to store the timing of scheduled suspends (frame) and the
+ // associated promise resolver. This storage can only be modified by the
+ // main thread and accessed by the audio thread with the graph lock.
+ //
+ // The map consists of key-value pairs of:
+ // { size_t quantizedFrame: ScriptPromiseResolver resolver }
+ //
+ // Note that |quantizedFrame| is a unique key, since you can have only one
+ // suspend scheduled for a certain frame. Accessing to this must be
+ // protected by the offline context lock.
+ SuspendMap scheduled_suspends_;
+
+ Member<ScriptPromiseResolver> complete_resolver_;
+
+ // This flag is necessary to indicate the rendering has actually started or
+ // running. Note that initial state of context is 'Suspended', which is the
+ // same state when the context is suspended, so we cannot utilize it for this
+ // purpose.
+ bool is_rendering_started_;
+
+ // Total render sample length.
+ size_t total_render_frames_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_OFFLINE_AUDIO_CONTEXT_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_context.idl b/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_context.idl
new file mode 100644
index 00000000000..92d713de756
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_context.idl
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2012, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+// See https://webaudio.github.io/web-audio-api/#OfflineAudioContext
+[
+ Constructor(unsigned long numberOfChannels, unsigned long numberOfFrames, float sampleRate),
+ Constructor(OfflineAudioContextOptions options),
+ ConstructorCallWith=ExecutionContext,
+ RaisesException=Constructor,
+ Measure
+] interface OfflineAudioContext : BaseAudioContext {
+ // Offline rendering
+ attribute EventHandler oncomplete;
+ readonly attribute unsigned long length;
+ [CallWith=ScriptState, ImplementedAs=startOfflineRendering, MeasureAs=OfflineAudioContextStartRendering] Promise<AudioBuffer> startRendering();
+ [CallWith=ScriptState, ImplementedAs=suspendContext, MeasureAs=OfflineAudioContextSuspend] Promise<void> suspend(double suspendTime);
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_context_options.idl b/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_context_options.idl
new file mode 100644
index 00000000000..d3adc6b8a27
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_context_options.idl
@@ -0,0 +1,10 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See hhttps://webaudio.github.io/web-audio-api/#dictdef-offlineaudiocontextoptions
+dictionary OfflineAudioContextOptions {
+ unsigned long numberOfChannels = 1;
+ required unsigned long length;
+ required float sampleRate;
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_destination_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_destination_node.cc
new file mode 100644
index 00000000000..dc9595eb751
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_destination_node.cc
@@ -0,0 +1,418 @@
+/*
+ * Copyright (C) 2011, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#include "third_party/blink/renderer/modules/webaudio/offline_audio_destination_node.h"
+
+#include <algorithm>
+#include "third_party/blink/public/platform/platform.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node_input.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node_output.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_worklet.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_worklet_messaging_proxy.h"
+#include "third_party/blink/renderer/modules/webaudio/base_audio_context.h"
+#include "third_party/blink/renderer/modules/webaudio/offline_audio_context.h"
+#include "third_party/blink/renderer/platform/audio/audio_bus.h"
+#include "third_party/blink/renderer/platform/audio/audio_utilities.h"
+#include "third_party/blink/renderer/platform/audio/denormal_disabler.h"
+#include "third_party/blink/renderer/platform/audio/hrtf_database_loader.h"
+#include "third_party/blink/renderer/platform/cross_thread_functional.h"
+
+namespace blink {
+
+OfflineAudioDestinationHandler::OfflineAudioDestinationHandler(
+ AudioNode& node,
+ unsigned number_of_channels,
+ size_t frames_to_process,
+ float sample_rate)
+ : AudioDestinationHandler(node),
+ render_target_(nullptr),
+ frames_processed_(0),
+ frames_to_process_(frames_to_process),
+ is_rendering_started_(false),
+ number_of_channels_(number_of_channels),
+ sample_rate_(sample_rate) {
+ channel_count_ = number_of_channels;
+
+ SetInternalChannelCountMode(kExplicit);
+ SetInternalChannelInterpretation(AudioBus::kSpeakers);
+
+ if (Context()->GetExecutionContext()) {
+ main_thread_task_runner_ = Context()->GetExecutionContext()->GetTaskRunner(
+ TaskType::kMiscPlatformAPI);
+ DCHECK(main_thread_task_runner_->BelongsToCurrentThread());
+ }
+}
+
+scoped_refptr<OfflineAudioDestinationHandler>
+OfflineAudioDestinationHandler::Create(AudioNode& node,
+ unsigned number_of_channels,
+ size_t frames_to_process,
+ float sample_rate) {
+ return base::AdoptRef(new OfflineAudioDestinationHandler(
+ node, number_of_channels, frames_to_process, sample_rate));
+}
+
+OfflineAudioDestinationHandler::~OfflineAudioDestinationHandler() {
+ DCHECK(!IsInitialized());
+}
+
+void OfflineAudioDestinationHandler::Dispose() {
+ Uninitialize();
+ AudioDestinationHandler::Dispose();
+}
+
+void OfflineAudioDestinationHandler::Initialize() {
+ if (IsInitialized())
+ return;
+
+ AudioHandler::Initialize();
+}
+
+void OfflineAudioDestinationHandler::Uninitialize() {
+ if (!IsInitialized())
+ return;
+
+ render_thread_.reset();
+
+ AudioHandler::Uninitialize();
+}
+
+OfflineAudioContext* OfflineAudioDestinationHandler::Context() const {
+ return static_cast<OfflineAudioContext*>(AudioDestinationHandler::Context());
+}
+
+unsigned long OfflineAudioDestinationHandler::MaxChannelCount() const {
+ return channel_count_;
+}
+
+void OfflineAudioDestinationHandler::StartRendering() {
+ DCHECK(IsMainThread());
+ DCHECK(render_target_);
+ DCHECK(render_thread_task_runner_);
+
+ // Rendering was not started. Starting now.
+ if (!is_rendering_started_) {
+ is_rendering_started_ = true;
+ PostCrossThreadTask(
+ *render_thread_task_runner_, FROM_HERE,
+ CrossThreadBind(&OfflineAudioDestinationHandler::StartOfflineRendering,
+ WrapRefCounted(this)));
+ return;
+ }
+
+ // Rendering is already started, which implicitly means we resume the
+ // rendering by calling |doOfflineRendering| on the render thread.
+ PostCrossThreadTask(
+ *render_thread_task_runner_, FROM_HERE,
+ CrossThreadBind(&OfflineAudioDestinationHandler::DoOfflineRendering,
+ WrapRefCounted(this)));
+}
+
+void OfflineAudioDestinationHandler::StopRendering() {
+ // offline audio rendering CANNOT BE stopped by JavaScript.
+ NOTREACHED();
+}
+
+size_t OfflineAudioDestinationHandler::CallbackBufferSize() const {
+ // The callback buffer size has no meaning for an offline context.
+ NOTREACHED();
+ return 0;
+}
+
+void OfflineAudioDestinationHandler::InitializeOfflineRenderThread(
+ AudioBuffer* render_target) {
+ DCHECK(IsMainThread());
+
+ render_target_ = render_target;
+ render_bus_ = AudioBus::Create(render_target->numberOfChannels(),
+ AudioUtilities::kRenderQuantumFrames);
+ DCHECK(render_bus_);
+
+ PrepareTaskRunnerForRendering();
+}
+
+void OfflineAudioDestinationHandler::StartOfflineRendering() {
+ DCHECK(!IsMainThread());
+
+ DCHECK(render_bus_);
+ if (!render_bus_)
+ return;
+
+ bool is_audio_context_initialized = Context()->IsDestinationInitialized();
+ DCHECK(is_audio_context_initialized);
+ if (!is_audio_context_initialized)
+ return;
+
+ bool channels_match =
+ render_bus_->NumberOfChannels() == render_target_->numberOfChannels();
+ DCHECK(channels_match);
+ if (!channels_match)
+ return;
+
+ bool is_render_bus_allocated =
+ render_bus_->length() >= AudioUtilities::kRenderQuantumFrames;
+ DCHECK(is_render_bus_allocated);
+ if (!is_render_bus_allocated)
+ return;
+
+ // Start rendering.
+ DoOfflineRendering();
+}
+
+void OfflineAudioDestinationHandler::DoOfflineRendering() {
+ DCHECK(!IsMainThread());
+
+ unsigned number_of_channels;
+ Vector<float*> destinations;
+ {
+ // Main thread GCs cannot happen while we're reading out channel
+ // data. Detect that condition by trying to take the cross-thread
+ // persistent lock which is held while a GC runs. If the lock is
+ // already held, simply delay rendering until the next quantum.
+ bool has_lock = ProcessHeap::CrossThreadPersistentMutex().TryLock();
+ if (!has_lock) {
+ // To ensure that the rendering step eventually happens, repost.
+ render_thread_task_runner_->PostTask(
+ FROM_HERE,
+ WTF::Bind(&OfflineAudioDestinationHandler::DoOfflineRendering,
+ WrapRefCounted(this)));
+ return;
+ }
+
+ number_of_channels = render_target_->numberOfChannels();
+ destinations.ReserveInitialCapacity(number_of_channels);
+ for (unsigned i = 0; i < number_of_channels; ++i)
+ destinations.push_back(render_target_->getChannelData(i).View()->Data());
+ ProcessHeap::CrossThreadPersistentMutex().unlock();
+ }
+
+ // If there is more to process and there is no suspension at the moment,
+ // do continue to render quanta. Then calling OfflineAudioContext.resume()
+ // will pick up the render loop again from where it was suspended.
+ while (frames_to_process_ > 0) {
+ // Suspend the rendering if a scheduled suspend found at the current
+ // sample frame. Otherwise render one quantum.
+ if (RenderIfNotSuspended(nullptr, render_bus_.get(),
+ AudioUtilities::kRenderQuantumFrames))
+ return;
+
+ size_t frames_available_to_copy =
+ std::min(frames_to_process_,
+ static_cast<size_t>(AudioUtilities::kRenderQuantumFrames));
+
+ for (unsigned channel_index = 0; channel_index < number_of_channels;
+ ++channel_index) {
+ const float* source = render_bus_->Channel(channel_index)->Data();
+ memcpy(destinations[channel_index] + frames_processed_, source,
+ sizeof(float) * frames_available_to_copy);
+ }
+
+ frames_processed_ += frames_available_to_copy;
+
+ DCHECK_GE(frames_to_process_, frames_available_to_copy);
+ frames_to_process_ -= frames_available_to_copy;
+ }
+
+ DCHECK_EQ(frames_to_process_, 0u);
+ FinishOfflineRendering();
+}
+
+void OfflineAudioDestinationHandler::SuspendOfflineRendering() {
+ DCHECK(!IsMainThread());
+
+ // The actual rendering has been suspended. Notify the context.
+ PostCrossThreadTask(
+ *main_thread_task_runner_, FROM_HERE,
+ CrossThreadBind(&OfflineAudioDestinationHandler::NotifySuspend,
+ WrapRefCounted(this), Context()->CurrentSampleFrame()));
+}
+
+void OfflineAudioDestinationHandler::FinishOfflineRendering() {
+ DCHECK(!IsMainThread());
+
+ // The actual rendering has been completed. Notify the context.
+ PostCrossThreadTask(
+ *main_thread_task_runner_, FROM_HERE,
+ CrossThreadBind(&OfflineAudioDestinationHandler::NotifyComplete,
+ WrapRefCounted(this)));
+}
+
+void OfflineAudioDestinationHandler::NotifySuspend(size_t frame) {
+ DCHECK(IsMainThread());
+
+ if (Context() && Context()->GetExecutionContext())
+ Context()->ResolveSuspendOnMainThread(frame);
+}
+
+void OfflineAudioDestinationHandler::NotifyComplete() {
+ DCHECK(IsMainThread());
+
+ render_thread_.reset();
+
+ // The OfflineAudioContext might be gone.
+ if (Context() && Context()->GetExecutionContext())
+ Context()->FireCompletionEvent();
+}
+
+bool OfflineAudioDestinationHandler::RenderIfNotSuspended(
+ AudioBus* source_bus,
+ AudioBus* destination_bus,
+ size_t number_of_frames) {
+ // We don't want denormals slowing down any of the audio processing
+ // since they can very seriously hurt performance.
+ // This will take care of all AudioNodes because they all process within this
+ // scope.
+ DenormalDisabler denormal_disabler;
+
+ // Need to check if the context actually alive. Otherwise the subsequent
+ // steps will fail. If the context is not alive somehow, return immediately
+ // and do nothing.
+ //
+ // TODO(hongchan): because the context can go away while rendering, so this
+ // check cannot guarantee the safe execution of the following steps.
+ DCHECK(Context());
+ if (!Context())
+ return false;
+
+ Context()->GetDeferredTaskHandler().SetAudioThreadToCurrentThread();
+
+ // If the destination node is not initialized, pass the silence to the final
+ // audio destination (one step before the FIFO). This check is for the case
+ // where the destination is in the middle of tearing down process.
+ if (!IsInitialized()) {
+ destination_bus->Zero();
+ return false;
+ }
+
+ // Take care pre-render tasks at the beginning of each render quantum. Then
+ // it will stop the rendering loop if the context needs to be suspended
+ // at the beginning of the next render quantum.
+ if (Context()->HandlePreOfflineRenderTasks()) {
+ SuspendOfflineRendering();
+ return true;
+ }
+
+ // Prepare the local audio input provider for this render quantum.
+ if (source_bus)
+ local_audio_input_provider_.Set(source_bus);
+
+ DCHECK_GE(NumberOfInputs(), 1u);
+ if (NumberOfInputs() < 1) {
+ destination_bus->Zero();
+ return false;
+ }
+ // This will cause the node(s) connected to us to process, which in turn will
+ // pull on their input(s), all the way backwards through the rendering graph.
+ AudioBus* rendered_bus = Input(0).Pull(destination_bus, number_of_frames);
+
+ if (!rendered_bus) {
+ destination_bus->Zero();
+ } else if (rendered_bus != destination_bus) {
+ // in-place processing was not possible - so copy
+ destination_bus->CopyFrom(*rendered_bus);
+ }
+
+ // Process nodes which need a little extra help because they are not connected
+ // to anything, but still need to process.
+ Context()->GetDeferredTaskHandler().ProcessAutomaticPullNodes(
+ number_of_frames);
+
+ // Let the context take care of any business at the end of each render
+ // quantum.
+ Context()->HandlePostOfflineRenderTasks();
+
+ // Advance current sample-frame.
+ size_t new_sample_frame = current_sample_frame_ + number_of_frames;
+ ReleaseStore(&current_sample_frame_, new_sample_frame);
+
+ Context()->UpdateWorkletGlobalScopeOnRenderingThread();
+
+ return false;
+}
+
+void OfflineAudioDestinationHandler::PrepareTaskRunnerForRendering() {
+ DCHECK(IsMainThread());
+
+ AudioWorklet* audio_worklet = Context()->audioWorklet();
+ if (audio_worklet && audio_worklet->IsReady()) {
+ if (!render_thread_) {
+ // The context (re)started with the AudioWorklet mode. Assign the task
+ // runner only when it is not set yet.
+ if (!render_thread_task_runner_) {
+ render_thread_task_runner_ =
+ audio_worklet->GetMessagingProxy()->GetBackingWorkerThread()
+ ->GetTaskRunner(TaskType::kMiscPlatformAPI);
+ }
+ } else {
+ // The AudioWorklet is activated and the render task runner should be
+ // changed.
+ render_thread_ = nullptr;
+ render_thread_task_runner_ =
+ audio_worklet->GetMessagingProxy()->GetBackingWorkerThread()
+ ->GetTaskRunner(TaskType::kMiscPlatformAPI);
+ }
+ } else {
+ if (!render_thread_) {
+ // The context started from the non-AudioWorklet mode.
+ render_thread_ = Platform::Current()->CreateThread(
+ WebThreadCreationParams(WebThreadType::kOfflineAudioRenderThread));
+ render_thread_task_runner_ = render_thread_->GetTaskRunner();
+ }
+ }
+
+ // The task runner MUST be valid at this point.
+ DCHECK(render_thread_task_runner_);
+}
+
+void OfflineAudioDestinationHandler::RestartRendering() {
+ DCHECK(IsMainThread());
+
+ // The rendering thread might have been changed, so we need to set up the
+ // task runner again.
+ PrepareTaskRunnerForRendering();
+};
+
+// ----------------------------------------------------------------
+
+OfflineAudioDestinationNode::OfflineAudioDestinationNode(
+ BaseAudioContext& context,
+ unsigned number_of_channels,
+ size_t frames_to_process,
+ float sample_rate)
+ : AudioDestinationNode(context) {
+ SetHandler(OfflineAudioDestinationHandler::Create(
+ *this, number_of_channels, frames_to_process, sample_rate));
+}
+
+OfflineAudioDestinationNode* OfflineAudioDestinationNode::Create(
+ BaseAudioContext* context,
+ unsigned number_of_channels,
+ size_t frames_to_process,
+ float sample_rate) {
+ return new OfflineAudioDestinationNode(*context, number_of_channels,
+ frames_to_process, sample_rate);
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_destination_node.h b/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_destination_node.h
new file mode 100644
index 00000000000..1928524c9a1
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/offline_audio_destination_node.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright (C) 2011, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_OFFLINE_AUDIO_DESTINATION_NODE_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_OFFLINE_AUDIO_DESTINATION_NODE_H_
+
+#include <memory>
+#include "base/memory/scoped_refptr.h"
+#include "third_party/blink/public/platform/web_thread.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_buffer.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_destination_node.h"
+#include "third_party/blink/renderer/modules/webaudio/offline_audio_context.h"
+
+namespace blink {
+
+class BaseAudioContext;
+class AudioBus;
+class OfflineAudioContext;
+
+class OfflineAudioDestinationHandler final : public AudioDestinationHandler {
+ public:
+ static scoped_refptr<OfflineAudioDestinationHandler> Create(
+ AudioNode&,
+ unsigned number_of_channels,
+ size_t frames_to_process,
+ float sample_rate);
+ ~OfflineAudioDestinationHandler() override;
+
+ // AudioHandler
+ void Dispose() override;
+ void Initialize() override;
+ void Uninitialize() override;
+
+ // AudioNode
+ double TailTime() const override { return 0; }
+ double LatencyTime() const override { return 0; }
+
+ OfflineAudioContext* Context() const final;
+
+ // AudioDestinationHandler
+ void StartRendering() override;
+ void StopRendering() override;
+ unsigned long MaxChannelCount() const override;
+
+ void RestartRendering() override;
+
+ // Returns the rendering callback buffer size. This should never be
+ // called.
+ size_t CallbackBufferSize() const override;
+
+ double SampleRate() const override { return sample_rate_; }
+ int FramesPerBuffer() const override {
+ NOTREACHED();
+ return 0;
+ }
+
+ size_t RenderQuantumFrames() const {
+ return AudioUtilities::kRenderQuantumFrames;
+ }
+
+ // This is called when rendering of the offline context is started
+ // which will save the rendered audio data in |render_target|. This
+ // allows creation of the AudioBuffer when startRendering is called
+ // instead of when the OfflineAudioContext is created.
+ void InitializeOfflineRenderThread(AudioBuffer* render_target);
+ AudioBuffer* RenderTarget() const { return render_target_.Get(); }
+
+ unsigned NumberOfChannels() const { return number_of_channels_; }
+
+ bool RequiresTailProcessing() const final { return false; }
+
+ private:
+ OfflineAudioDestinationHandler(AudioNode&,
+ unsigned number_of_channels,
+ size_t frames_to_process,
+ float sample_rate);
+
+ // Set up the rendering and start. After setting the context up, it will
+ // eventually call |doOfflineRendering|.
+ void StartOfflineRendering();
+
+ // Suspend the rendering loop and notify the main thread to resolve the
+ // associated promise.
+ void SuspendOfflineRendering();
+
+ // Start the rendering loop.
+ void DoOfflineRendering();
+
+ // Finish the rendering loop and notify the main thread to resolve the
+ // promise with the rendered buffer.
+ void FinishOfflineRendering();
+
+ // Suspend/completion callbacks for the main thread.
+ void NotifySuspend(size_t);
+ void NotifyComplete();
+
+ // The offline version of render() method. If the rendering needs to be
+ // suspended after checking, this stops the rendering and returns true.
+ // Otherwise, it returns false after rendering one quantum.
+ bool RenderIfNotSuspended(AudioBus* source_bus,
+ AudioBus* destination_bus,
+ size_t number_of_frames);
+
+ // Prepares a task runner for the rendering based on the operation mode
+ // (i.e. non-AudioWorklet or AudioWorklet). This is called when the
+ // rendering restarts such as context.resume() after context.suspend().
+ // The only possible transition is from the non-AudioWorklet mode to the
+ // AudioWorklet mode. Once the AudioWorklet mode is activated, the task runner
+ // from AudioWorkletThread will be used until the rendering is finished.
+ void PrepareTaskRunnerForRendering();
+
+ // This AudioHandler renders into this AudioBuffer.
+ // This Persistent doesn't make a reference cycle including the owner
+ // OfflineAudioDestinationNode. It is accessed by both audio and main thread.
+ CrossThreadPersistent<AudioBuffer> render_target_;
+ // Temporary AudioBus for each render quantum.
+ scoped_refptr<AudioBus> render_bus_;
+
+ // These variables are for counting the number of frames for the current
+ // progress and the remaining frames to be processed.
+ size_t frames_processed_;
+ size_t frames_to_process_;
+
+ // This flag is necessary to distinguish the state of the context between
+ // 'created' and 'suspended'. If this flag is false and the current state
+ // is 'suspended', it means the context is created and have not started yet.
+ bool is_rendering_started_;
+
+ unsigned number_of_channels_;
+ float sample_rate_;
+
+ // The rendering thread for the non-AudioWorklet mode. For the AudioWorklet
+ // node, AudioWorkletThread will drive the rendering.
+ std::unique_ptr<WebThread> render_thread_;
+
+ scoped_refptr<base::SingleThreadTaskRunner> render_thread_task_runner_;
+ scoped_refptr<base::SingleThreadTaskRunner> main_thread_task_runner_;
+};
+
+class OfflineAudioDestinationNode final : public AudioDestinationNode {
+ public:
+ static OfflineAudioDestinationNode* Create(BaseAudioContext*,
+ unsigned number_of_channels,
+ size_t frames_to_process,
+ float sample_rate);
+
+ private:
+ OfflineAudioDestinationNode(BaseAudioContext&,
+ unsigned number_of_channels,
+ size_t frames_to_process,
+ float sample_rate);
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_OFFLINE_AUDIO_DESTINATION_NODE_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/oscillator_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/oscillator_node.cc
new file mode 100644
index 00000000000..d09ed132b7b
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/oscillator_node.cc
@@ -0,0 +1,558 @@
+/*
+ * Copyright (C) 2012, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#include <algorithm>
+#include "third_party/blink/renderer/bindings/core/v8/exception_messages.h"
+#include "third_party/blink/renderer/bindings/core/v8/exception_state.h"
+#include "third_party/blink/renderer/core/dom/exception_code.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node_output.h"
+#include "third_party/blink/renderer/modules/webaudio/oscillator_node.h"
+#include "third_party/blink/renderer/modules/webaudio/periodic_wave.h"
+#include "third_party/blink/renderer/platform/audio/audio_utilities.h"
+#include "third_party/blink/renderer/platform/audio/vector_math.h"
+#include "third_party/blink/renderer/platform/wtf/math_extras.h"
+#include "third_party/blink/renderer/platform/wtf/std_lib_extras.h"
+
+namespace blink {
+
+using namespace VectorMath;
+
+OscillatorHandler::OscillatorHandler(AudioNode& node,
+ float sample_rate,
+ const String& oscillator_type,
+ PeriodicWave* wave_table,
+ AudioParamHandler& frequency,
+ AudioParamHandler& detune)
+ : AudioScheduledSourceHandler(kNodeTypeOscillator, node, sample_rate),
+ frequency_(&frequency),
+ detune_(&detune),
+ first_render_(true),
+ virtual_read_index_(0),
+ phase_increments_(AudioUtilities::kRenderQuantumFrames),
+ detune_values_(AudioUtilities::kRenderQuantumFrames) {
+ if (wave_table) {
+ // A PeriodicWave overrides any value for the oscillator type,
+ // forcing the type to be 'custom".
+ SetPeriodicWave(wave_table);
+ } else {
+ if (oscillator_type == "sine")
+ SetType(SINE);
+ else if (oscillator_type == "square")
+ SetType(SQUARE);
+ else if (oscillator_type == "sawtooth")
+ SetType(SAWTOOTH);
+ else if (oscillator_type == "triangle")
+ SetType(TRIANGLE);
+ else
+ NOTREACHED();
+ }
+
+ // An oscillator is always mono.
+ AddOutput(1);
+
+ Initialize();
+}
+
+scoped_refptr<OscillatorHandler> OscillatorHandler::Create(
+ AudioNode& node,
+ float sample_rate,
+ const String& oscillator_type,
+ PeriodicWave* wave_table,
+ AudioParamHandler& frequency,
+ AudioParamHandler& detune) {
+ return base::AdoptRef(new OscillatorHandler(
+ node, sample_rate, oscillator_type, wave_table, frequency, detune));
+}
+
+OscillatorHandler::~OscillatorHandler() {
+ Uninitialize();
+}
+
+String OscillatorHandler::GetType() const {
+ switch (type_) {
+ case SINE:
+ return "sine";
+ case SQUARE:
+ return "square";
+ case SAWTOOTH:
+ return "sawtooth";
+ case TRIANGLE:
+ return "triangle";
+ case CUSTOM:
+ return "custom";
+ default:
+ NOTREACHED();
+ return "custom";
+ }
+}
+
+void OscillatorHandler::SetType(const String& type,
+ ExceptionState& exception_state) {
+ if (type == "sine") {
+ SetType(SINE);
+ } else if (type == "square") {
+ SetType(SQUARE);
+ } else if (type == "sawtooth") {
+ SetType(SAWTOOTH);
+ } else if (type == "triangle") {
+ SetType(TRIANGLE);
+ } else if (type == "custom") {
+ exception_state.ThrowDOMException(kInvalidStateError,
+ "'type' cannot be set directly to "
+ "'custom'. Use setPeriodicWave() to "
+ "create a custom Oscillator type.");
+ }
+}
+
+bool OscillatorHandler::SetType(unsigned type) {
+ PeriodicWave* periodic_wave = nullptr;
+
+ switch (type) {
+ case SINE:
+ periodic_wave = Context()->GetPeriodicWave(SINE);
+ break;
+ case SQUARE:
+ periodic_wave = Context()->GetPeriodicWave(SQUARE);
+ break;
+ case SAWTOOTH:
+ periodic_wave = Context()->GetPeriodicWave(SAWTOOTH);
+ break;
+ case TRIANGLE:
+ periodic_wave = Context()->GetPeriodicWave(TRIANGLE);
+ break;
+ case CUSTOM:
+ default:
+ // Return false for invalid types, including CUSTOM since
+ // setPeriodicWave() method must be called explicitly.
+ NOTREACHED();
+ return false;
+ }
+
+ SetPeriodicWave(periodic_wave);
+ type_ = type;
+ return true;
+}
+
+bool OscillatorHandler::CalculateSampleAccuratePhaseIncrements(
+ size_t frames_to_process) {
+ bool is_good = frames_to_process <= phase_increments_.size() &&
+ frames_to_process <= detune_values_.size();
+ DCHECK(is_good);
+ if (!is_good)
+ return false;
+
+ if (first_render_) {
+ first_render_ = false;
+ frequency_->ResetSmoothedValue();
+ detune_->ResetSmoothedValue();
+ }
+
+ bool has_sample_accurate_values = false;
+ bool has_frequency_changes = false;
+ float* phase_increments = phase_increments_.Data();
+
+ float final_scale = periodic_wave_->RateScale();
+
+ if (frequency_->HasSampleAccurateValues()) {
+ has_sample_accurate_values = true;
+ has_frequency_changes = true;
+
+ // Get the sample-accurate frequency values and convert to phase increments.
+ // They will be converted to phase increments below.
+ frequency_->CalculateSampleAccurateValues(phase_increments,
+ frames_to_process);
+ } else {
+ // Handle ordinary parameter changes if there are no scheduled changes.
+ float frequency = frequency_->Value();
+ final_scale *= frequency;
+ }
+
+ if (detune_->HasSampleAccurateValues()) {
+ has_sample_accurate_values = true;
+
+ // Get the sample-accurate detune values.
+ float* detune_values =
+ has_frequency_changes ? detune_values_.Data() : phase_increments;
+ detune_->CalculateSampleAccurateValues(detune_values, frames_to_process);
+
+ // Convert from cents to rate scalar.
+ float k = 1.0 / 1200;
+ Vsmul(detune_values, 1, &k, detune_values, 1, frames_to_process);
+ for (unsigned i = 0; i < frames_to_process; ++i)
+ detune_values[i] = powf(
+ 2, detune_values[i]); // FIXME: converting to expf() will be faster.
+
+ if (has_frequency_changes) {
+ // Multiply frequencies by detune scalings.
+ Vmul(detune_values, 1, phase_increments, 1, phase_increments, 1,
+ frames_to_process);
+ }
+ } else {
+ // Handle ordinary parameter changes if there are no scheduled
+ // changes.
+ float detune = detune_->Value();
+ float detune_scale = powf(2, detune / 1200);
+ final_scale *= detune_scale;
+ }
+
+ if (has_sample_accurate_values) {
+ // Convert from frequency to wavetable increment.
+ Vsmul(phase_increments, 1, &final_scale, phase_increments, 1,
+ frames_to_process);
+ }
+
+ return has_sample_accurate_values;
+}
+
+static float DoInterpolation(double virtual_read_index,
+ float incr,
+ unsigned read_index_mask,
+ float table_interpolation_factor,
+ const float* lower_wave_data,
+ const float* higher_wave_data) {
+ DCHECK_GE(incr, 0);
+
+ double sample_lower = 0;
+ double sample_higher = 0;
+
+ unsigned read_index_0 = static_cast<unsigned>(virtual_read_index);
+
+ // Consider a typical sample rate of 44100 Hz and max periodic wave
+ // size of 4096. The relationship between |incr| and the frequency
+ // of the oscillator is |incr| = freq * 4096/44100. Or freq =
+ // |incr|*44100/4096 = 10.8*|incr|.
+ //
+ // For the |incr| thresholds below, this means that we use linear
+ // interpolation for all freq >= 3.2 Hz, 3-point Lagrange
+ // for freq >= 1.7 Hz and 5-point Lagrange for every thing else.
+ //
+ // We use Lagrange interpolation because it's relatively simple to
+ // implement and fairly inexpensive, and the interpolator always
+ // passes through known points.
+ if (incr >= 0.3) {
+ // Increment is fairly large, so we're doing no more than about 3
+ // points between each wave table entry. Assume linear
+ // interpolation between points is good enough.
+ unsigned read_index2 = read_index_0 + 1;
+
+ // Contain within valid range.
+ read_index_0 = read_index_0 & read_index_mask;
+ read_index2 = read_index2 & read_index_mask;
+
+ float sample1_lower = lower_wave_data[read_index_0];
+ float sample2_lower = lower_wave_data[read_index2];
+ float sample1_higher = higher_wave_data[read_index_0];
+ float sample2_higher = higher_wave_data[read_index2];
+
+ // Linearly interpolate within each table (lower and higher).
+ double interpolation_factor =
+ static_cast<float>(virtual_read_index) - read_index_0;
+ sample_higher = (1 - interpolation_factor) * sample1_higher +
+ interpolation_factor * sample2_higher;
+ sample_lower = (1 - interpolation_factor) * sample1_lower +
+ interpolation_factor * sample2_lower;
+
+ } else if (incr >= .16) {
+ // We're doing about 6 interpolation values between each wave
+ // table sample. Just use a 3-point Lagrange interpolator to get a
+ // better estimate than just linear.
+ //
+ // See 3-point formula in http://dlmf.nist.gov/3.3#ii
+ unsigned read_index[3];
+
+ for (int k = -1; k <= 1; ++k) {
+ read_index[k + 1] = (read_index_0 + k) & read_index_mask;
+ }
+
+ double a[3];
+ double t = virtual_read_index - read_index_0;
+
+ a[0] = 0.5 * t * (t - 1);
+ a[1] = 1 - t * t;
+ a[2] = 0.5 * t * (t + 1);
+
+ for (int k = 0; k < 3; ++k) {
+ sample_lower += a[k] * lower_wave_data[read_index[k]];
+ sample_higher += a[k] * higher_wave_data[read_index[k]];
+ }
+ } else {
+ // For everything else (more than 6 points per entry), we'll do a
+ // 5-point Lagrange interpolator. This is a trade-off between
+ // quality and speed.
+ //
+ // See 5-point formula in http://dlmf.nist.gov/3.3#ii
+ unsigned read_index[5];
+ for (int k = -2; k <= 2; ++k) {
+ read_index[k + 2] = (read_index_0 + k) & read_index_mask;
+ }
+
+ double a[5];
+ double t = virtual_read_index - read_index_0;
+ double t2 = t * t;
+
+ a[0] = t * (t2 - 1) * (t - 2) / 24;
+ a[1] = -t * (t - 1) * (t2 - 4) / 6;
+ a[2] = (t2 - 1) * (t2 - 4) / 4;
+ a[3] = -t * (t + 1) * (t2 - 4) / 6;
+ a[4] = t * (t2 - 1) * (t + 2) / 24;
+
+ for (int k = 0; k < 5; ++k) {
+ sample_lower += a[k] * lower_wave_data[read_index[k]];
+ sample_higher += a[k] * higher_wave_data[read_index[k]];
+ }
+ }
+
+ // Then interpolate between the two tables.
+ float sample = (1 - table_interpolation_factor) * sample_higher +
+ table_interpolation_factor * sample_lower;
+ return sample;
+}
+
+void OscillatorHandler::Process(size_t frames_to_process) {
+ AudioBus* output_bus = Output(0).Bus();
+
+ if (!IsInitialized() || !output_bus->NumberOfChannels()) {
+ output_bus->Zero();
+ return;
+ }
+
+ DCHECK_LE(frames_to_process, phase_increments_.size());
+ if (frames_to_process > phase_increments_.size())
+ return;
+
+ // The audio thread can't block on this lock, so we call tryLock() instead.
+ MutexTryLocker try_locker(process_lock_);
+ if (!try_locker.Locked()) {
+ // Too bad - the tryLock() failed. We must be in the middle of changing
+ // wave-tables.
+ output_bus->Zero();
+ return;
+ }
+
+ // We must access m_periodicWave only inside the lock.
+ if (!periodic_wave_.Get()) {
+ output_bus->Zero();
+ return;
+ }
+
+ size_t quantum_frame_offset;
+ size_t non_silent_frames_to_process;
+ double start_frame_offset;
+
+ UpdateSchedulingInfo(frames_to_process, output_bus, quantum_frame_offset,
+ non_silent_frames_to_process, start_frame_offset);
+
+ if (!non_silent_frames_to_process) {
+ output_bus->Zero();
+ return;
+ }
+
+ unsigned periodic_wave_size = periodic_wave_->PeriodicWaveSize();
+ double inv_periodic_wave_size = 1.0 / periodic_wave_size;
+
+ float* dest_p = output_bus->Channel(0)->MutableData();
+
+ DCHECK_LE(quantum_frame_offset, frames_to_process);
+
+ // We keep virtualReadIndex double-precision since we're accumulating values.
+ double virtual_read_index = virtual_read_index_;
+
+ float rate_scale = periodic_wave_->RateScale();
+ float inv_rate_scale = 1 / rate_scale;
+ bool has_sample_accurate_values =
+ CalculateSampleAccuratePhaseIncrements(frames_to_process);
+
+ float frequency = 0;
+ float* higher_wave_data = nullptr;
+ float* lower_wave_data = nullptr;
+ float table_interpolation_factor = 0;
+
+ if (!has_sample_accurate_values) {
+ frequency = frequency_->Value();
+ float detune = detune_->Value();
+ float detune_scale = powf(2, detune / 1200);
+ frequency *= detune_scale;
+ periodic_wave_->WaveDataForFundamentalFrequency(frequency, lower_wave_data,
+ higher_wave_data,
+ table_interpolation_factor);
+ }
+
+ float incr = frequency * rate_scale;
+ float* phase_increments = phase_increments_.Data();
+
+ unsigned read_index_mask = periodic_wave_size - 1;
+
+ // Start rendering at the correct offset.
+ dest_p += quantum_frame_offset;
+ int n = non_silent_frames_to_process;
+
+ // If startFrameOffset is not 0, that means the oscillator doesn't actually
+ // start at quantumFrameOffset, but just past that time. Adjust destP and n
+ // to reflect that, and adjust virtualReadIndex to start the value at
+ // startFrameOffset.
+ if (start_frame_offset > 0) {
+ ++dest_p;
+ --n;
+ virtual_read_index += (1 - start_frame_offset) * frequency * rate_scale;
+ DCHECK(virtual_read_index < periodic_wave_size);
+ } else if (start_frame_offset < 0) {
+ virtual_read_index = -start_frame_offset * frequency * rate_scale;
+ }
+
+ while (n--) {
+ if (has_sample_accurate_values) {
+ incr = *phase_increments++;
+
+ frequency = inv_rate_scale * incr;
+ periodic_wave_->WaveDataForFundamentalFrequency(
+ frequency, lower_wave_data, higher_wave_data,
+ table_interpolation_factor);
+ }
+
+ float sample = DoInterpolation(virtual_read_index, fabs(incr),
+ read_index_mask, table_interpolation_factor,
+ lower_wave_data, higher_wave_data);
+
+ *dest_p++ = sample;
+
+ // Increment virtual read index and wrap virtualReadIndex into the range
+ // 0 -> periodicWaveSize.
+ virtual_read_index += incr;
+ virtual_read_index -=
+ floor(virtual_read_index * inv_periodic_wave_size) * periodic_wave_size;
+ }
+
+ virtual_read_index_ = virtual_read_index;
+
+ output_bus->ClearSilentFlag();
+}
+
+void OscillatorHandler::SetPeriodicWave(PeriodicWave* periodic_wave) {
+ DCHECK(IsMainThread());
+ DCHECK(periodic_wave);
+
+ // This synchronizes with process().
+ MutexLocker process_locker(process_lock_);
+ periodic_wave_ = periodic_wave;
+ type_ = CUSTOM;
+}
+
+bool OscillatorHandler::PropagatesSilence() const {
+ return !IsPlayingOrScheduled() || HasFinished() || !periodic_wave_.Get();
+}
+
+// ----------------------------------------------------------------
+
+OscillatorNode::OscillatorNode(BaseAudioContext& context,
+ const String& oscillator_type,
+ PeriodicWave* wave_table)
+ : AudioScheduledSourceNode(context),
+ // Use musical pitch standard A440 as a default.
+ frequency_(AudioParam::Create(context,
+ kParamTypeOscillatorFrequency,
+ "Oscillator.frequency",
+ 440,
+ -context.sampleRate() / 2,
+ context.sampleRate() / 2)),
+ // Default to no detuning.
+ detune_(AudioParam::Create(context,
+ kParamTypeOscillatorDetune,
+ "Oscillator.detune",
+ 0)) {
+ SetHandler(OscillatorHandler::Create(
+ *this, context.sampleRate(), oscillator_type, wave_table,
+ frequency_->Handler(), detune_->Handler()));
+}
+
+OscillatorNode* OscillatorNode::Create(BaseAudioContext& context,
+ const String& oscillator_type,
+ PeriodicWave* wave_table,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ if (context.IsContextClosed()) {
+ context.ThrowExceptionForClosedState(exception_state);
+ return nullptr;
+ }
+
+ return new OscillatorNode(context, oscillator_type, wave_table);
+}
+
+OscillatorNode* OscillatorNode::Create(BaseAudioContext* context,
+ const OscillatorOptions& options,
+ ExceptionState& exception_state) {
+ if (options.type() == "custom" && !options.hasPeriodicWave()) {
+ exception_state.ThrowDOMException(
+ kInvalidStateError,
+ "A PeriodicWave must be specified if the type is set to \"custom\"");
+ return nullptr;
+ }
+
+ OscillatorNode* node =
+ Create(*context, options.type(), options.periodicWave(), exception_state);
+
+ if (!node)
+ return nullptr;
+
+ node->HandleChannelOptions(options, exception_state);
+
+ node->detune()->setValue(options.detune());
+ node->frequency()->setValue(options.frequency());
+
+ return node;
+}
+
+void OscillatorNode::Trace(blink::Visitor* visitor) {
+ visitor->Trace(frequency_);
+ visitor->Trace(detune_);
+ AudioScheduledSourceNode::Trace(visitor);
+}
+
+OscillatorHandler& OscillatorNode::GetOscillatorHandler() const {
+ return static_cast<OscillatorHandler&>(Handler());
+}
+
+String OscillatorNode::type() const {
+ return GetOscillatorHandler().GetType();
+}
+
+void OscillatorNode::setType(const String& type,
+ ExceptionState& exception_state) {
+ GetOscillatorHandler().SetType(type, exception_state);
+}
+
+AudioParam* OscillatorNode::frequency() {
+ return frequency_;
+}
+
+AudioParam* OscillatorNode::detune() {
+ return detune_;
+}
+
+void OscillatorNode::setPeriodicWave(PeriodicWave* wave) {
+ GetOscillatorHandler().SetPeriodicWave(wave);
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/oscillator_node.h b/chromium/third_party/blink/renderer/modules/webaudio/oscillator_node.h
new file mode 100644
index 00000000000..4ba37848e85
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/oscillator_node.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2012, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_OSCILLATOR_NODE_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_OSCILLATOR_NODE_H_
+
+#include "base/memory/scoped_refptr.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_param.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_scheduled_source_node.h"
+#include "third_party/blink/renderer/modules/webaudio/oscillator_options.h"
+#include "third_party/blink/renderer/platform/audio/audio_bus.h"
+#include "third_party/blink/renderer/platform/wtf/threading.h"
+
+namespace blink {
+
+class BaseAudioContext;
+class ExceptionState;
+class OscillatorOptions;
+class PeriodicWave;
+
+// OscillatorNode is an audio generator of periodic waveforms.
+
+class OscillatorHandler final : public AudioScheduledSourceHandler {
+ public:
+ // The waveform type.
+ // These must be defined as in the .idl file.
+ enum { SINE = 0, SQUARE = 1, SAWTOOTH = 2, TRIANGLE = 3, CUSTOM = 4 };
+
+ static scoped_refptr<OscillatorHandler> Create(AudioNode&,
+ float sample_rate,
+ const String& oscillator_type,
+ PeriodicWave* wave_table,
+ AudioParamHandler& frequency,
+ AudioParamHandler& detune);
+ ~OscillatorHandler() override;
+
+ // AudioHandler
+ void Process(size_t frames_to_process) override;
+
+ String GetType() const;
+ void SetType(const String&, ExceptionState&);
+
+ void SetPeriodicWave(PeriodicWave*);
+
+ private:
+ OscillatorHandler(AudioNode&,
+ float sample_rate,
+ const String& oscillator_type,
+ PeriodicWave* wave_table,
+ AudioParamHandler& frequency,
+ AudioParamHandler& detune);
+ bool SetType(unsigned); // Returns true on success.
+
+ // Returns true if there are sample-accurate timeline parameter changes.
+ bool CalculateSampleAccuratePhaseIncrements(size_t frames_to_process);
+
+ bool PropagatesSilence() const override;
+
+ // One of the waveform types defined in the enum.
+ unsigned short type_;
+
+ // Frequency value in Hertz.
+ scoped_refptr<AudioParamHandler> frequency_;
+
+ // Detune value (deviating from the frequency) in Cents.
+ scoped_refptr<AudioParamHandler> detune_;
+
+ bool first_render_;
+
+ // m_virtualReadIndex is a sample-frame index into our buffer representing the
+ // current playback position. Since it's floating-point, it has sub-sample
+ // accuracy.
+ double virtual_read_index_;
+
+ // Stores sample-accurate values calculated according to frequency and detune.
+ AudioFloatArray phase_increments_;
+ AudioFloatArray detune_values_;
+
+ // This Persistent doesn't make a reference cycle including the owner
+ // OscillatorNode. It is cross-thread, as it will be accessed by the audio
+ // thread.
+ CrossThreadPersistent<PeriodicWave> periodic_wave_;
+};
+
+class OscillatorNode final : public AudioScheduledSourceNode {
+ DEFINE_WRAPPERTYPEINFO();
+
+ public:
+ static OscillatorNode* Create(BaseAudioContext&,
+ const String& oscillator_type,
+ PeriodicWave* wave_table,
+ ExceptionState&);
+ static OscillatorNode* Create(BaseAudioContext*,
+ const OscillatorOptions&,
+ ExceptionState&);
+ virtual void Trace(blink::Visitor*);
+
+ String type() const;
+ void setType(const String&, ExceptionState&);
+ AudioParam* frequency();
+ AudioParam* detune();
+ void setPeriodicWave(PeriodicWave*);
+
+ private:
+ OscillatorNode(BaseAudioContext&,
+ const String& oscillator_type,
+ PeriodicWave* wave_table);
+ OscillatorHandler& GetOscillatorHandler() const;
+
+ Member<AudioParam> frequency_;
+ Member<AudioParam> detune_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_OSCILLATOR_NODE_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/oscillator_node.idl b/chromium/third_party/blink/renderer/modules/webaudio/oscillator_node.idl
new file mode 100644
index 00000000000..d5cf7b82b5c
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/oscillator_node.idl
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2012, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+// See https://webaudio.github.io/web-audio-api/#oscillatornode
+enum OscillatorType {
+ "sine",
+ "square",
+ "sawtooth",
+ "triangle",
+ "custom"
+};
+
+// OscillatorNode is an audio generator of periodic waveforms.
+[
+ Constructor(BaseAudioContext context, optional OscillatorOptions options),
+ RaisesException=Constructor,
+ Measure
+]
+interface OscillatorNode : AudioScheduledSourceNode {
+
+ [RaisesException=Setter] attribute OscillatorType type;
+
+ readonly attribute AudioParam frequency; // in Hertz
+ readonly attribute AudioParam detune; // in Cents
+
+ void setPeriodicWave(PeriodicWave periodicWave);
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/oscillator_options.idl b/chromium/third_party/blink/renderer/modules/webaudio/oscillator_options.idl
new file mode 100644
index 00000000000..d9e7ef88ba7
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/oscillator_options.idl
@@ -0,0 +1,11 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See https://webaudio.github.io/web-audio-api/#dictdef-oscillatoroptions
+dictionary OscillatorOptions : AudioNodeOptions {
+ OscillatorType type = "sine";
+ float detune = 0;
+ float frequency = 440;
+ PeriodicWave? periodicWave;
+}; \ No newline at end of file
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/panner_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/panner_node.cc
new file mode 100644
index 00000000000..1ea44c19d57
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/panner_node.cc
@@ -0,0 +1,848 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#include "third_party/blink/renderer/bindings/core/v8/exception_messages.h"
+#include "third_party/blink/renderer/bindings/core/v8/exception_state.h"
+#include "third_party/blink/renderer/core/dom/exception_code.h"
+#include "third_party/blink/renderer/core/execution_context/execution_context.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_buffer_source_node.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node_input.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node_output.h"
+#include "third_party/blink/renderer/modules/webaudio/base_audio_context.h"
+#include "third_party/blink/renderer/modules/webaudio/panner_node.h"
+#include "third_party/blink/renderer/modules/webaudio/panner_options.h"
+#include "third_party/blink/renderer/platform/audio/hrtf_panner.h"
+#include "third_party/blink/renderer/platform/histogram.h"
+#include "third_party/blink/renderer/platform/wtf/math_extras.h"
+
+namespace blink {
+
+static void FixNANs(double& x) {
+ if (std::isnan(x) || std::isinf(x))
+ x = 0.0;
+}
+
+PannerHandler::PannerHandler(AudioNode& node,
+ float sample_rate,
+ AudioParamHandler& position_x,
+ AudioParamHandler& position_y,
+ AudioParamHandler& position_z,
+ AudioParamHandler& orientation_x,
+ AudioParamHandler& orientation_y,
+ AudioParamHandler& orientation_z)
+ : AudioHandler(kNodeTypePanner, node, sample_rate),
+ listener_(node.context()->listener()),
+ distance_model_(DistanceEffect::kModelInverse),
+ is_azimuth_elevation_dirty_(true),
+ is_distance_cone_gain_dirty_(true),
+ cached_azimuth_(0),
+ cached_elevation_(0),
+ cached_distance_cone_gain_(1.0f),
+ position_x_(&position_x),
+ position_y_(&position_y),
+ position_z_(&position_z),
+ orientation_x_(&orientation_x),
+ orientation_y_(&orientation_y),
+ orientation_z_(&orientation_z) {
+ AddInput();
+ AddOutput(2);
+
+ // Node-specific default mixing rules.
+ channel_count_ = 2;
+ SetInternalChannelCountMode(kClampedMax);
+ SetInternalChannelInterpretation(AudioBus::kSpeakers);
+
+ // Explicitly set the default panning model here so that the histograms
+ // include the default value.
+ SetPanningModel("equalpower");
+
+ Initialize();
+}
+
+scoped_refptr<PannerHandler> PannerHandler::Create(
+ AudioNode& node,
+ float sample_rate,
+ AudioParamHandler& position_x,
+ AudioParamHandler& position_y,
+ AudioParamHandler& position_z,
+ AudioParamHandler& orientation_x,
+ AudioParamHandler& orientation_y,
+ AudioParamHandler& orientation_z) {
+ return base::AdoptRef(new PannerHandler(node, sample_rate, position_x,
+ position_y, position_z, orientation_x,
+ orientation_y, orientation_z));
+}
+
+PannerHandler::~PannerHandler() {
+ Uninitialize();
+}
+
+void PannerHandler::Process(size_t frames_to_process) {
+ AudioBus* destination = Output(0).Bus();
+
+ if (!IsInitialized() || !panner_.get()) {
+ destination->Zero();
+ return;
+ }
+
+ AudioBus* source = Input(0).Bus();
+ if (!source) {
+ destination->Zero();
+ return;
+ }
+
+ // The audio thread can't block on this lock, so we call tryLock() instead.
+ MutexTryLocker try_locker(process_lock_);
+ MutexTryLocker try_listener_locker(Listener()->ListenerLock());
+
+ if (try_locker.Locked() && try_listener_locker.Locked()) {
+ if (!Context()->HasRealtimeConstraint() &&
+ panning_model_ == Panner::kPanningModelHRTF) {
+ // For an OfflineAudioContext, we need to make sure the HRTFDatabase
+ // is loaded before proceeding. For realtime contexts, we don't
+ // have to wait. The HRTF panner handles that case itself.
+ Listener()->WaitForHRTFDatabaseLoaderThreadCompletion();
+ }
+
+ if (HasSampleAccurateValues() || Listener()->HasSampleAccurateValues()) {
+ // It's tempting to skip sample-accurate processing if
+ // isAzimuthElevationDirty() and isDistanceConeGain() both return false.
+ // But in general we can't because something may scheduled to start in the
+ // middle of the rendering quantum. On the other hand, the audible effect
+ // may be small enough that we can afford to do this optimization.
+ ProcessSampleAccurateValues(destination, source, frames_to_process);
+ } else {
+ // Apply the panning effect.
+ double azimuth;
+ double elevation;
+
+ // Update dirty state in case something has moved; this can happen if the
+ // AudioParam for the position or orientation component is set directly.
+ UpdateDirtyState();
+
+ AzimuthElevation(&azimuth, &elevation);
+
+ panner_->Pan(azimuth, elevation, source, destination, frames_to_process,
+ InternalChannelInterpretation());
+
+ // Get the distance and cone gain.
+ float total_gain = DistanceConeGain();
+
+ // Apply gain in-place.
+ destination->CopyWithGainFrom(*destination, total_gain);
+ }
+ } else {
+ // Too bad - The tryLock() failed. We must be in the middle of changing the
+ // properties of the panner or the listener.
+ destination->Zero();
+ }
+}
+
+void PannerHandler::ProcessSampleAccurateValues(AudioBus* destination,
+ const AudioBus* source,
+ size_t frames_to_process) {
+ CHECK_LE(frames_to_process, AudioUtilities::kRenderQuantumFrames);
+
+ // Get the sample accurate values from all of the AudioParams, including the
+ // values from the AudioListener.
+ float panner_x[AudioUtilities::kRenderQuantumFrames];
+ float panner_y[AudioUtilities::kRenderQuantumFrames];
+ float panner_z[AudioUtilities::kRenderQuantumFrames];
+
+ float orientation_x[AudioUtilities::kRenderQuantumFrames];
+ float orientation_y[AudioUtilities::kRenderQuantumFrames];
+ float orientation_z[AudioUtilities::kRenderQuantumFrames];
+
+ position_x_->CalculateSampleAccurateValues(panner_x, frames_to_process);
+ position_y_->CalculateSampleAccurateValues(panner_y, frames_to_process);
+ position_z_->CalculateSampleAccurateValues(panner_z, frames_to_process);
+ orientation_x_->CalculateSampleAccurateValues(orientation_x,
+ frames_to_process);
+ orientation_y_->CalculateSampleAccurateValues(orientation_y,
+ frames_to_process);
+ orientation_z_->CalculateSampleAccurateValues(orientation_z,
+ frames_to_process);
+
+ // Get the automation values from the listener.
+ const float* listener_x =
+ Listener()->GetPositionXValues(AudioUtilities::kRenderQuantumFrames);
+ const float* listener_y =
+ Listener()->GetPositionYValues(AudioUtilities::kRenderQuantumFrames);
+ const float* listener_z =
+ Listener()->GetPositionZValues(AudioUtilities::kRenderQuantumFrames);
+
+ const float* forward_x =
+ Listener()->GetForwardXValues(AudioUtilities::kRenderQuantumFrames);
+ const float* forward_y =
+ Listener()->GetForwardYValues(AudioUtilities::kRenderQuantumFrames);
+ const float* forward_z =
+ Listener()->GetForwardZValues(AudioUtilities::kRenderQuantumFrames);
+
+ const float* up_x =
+ Listener()->GetUpXValues(AudioUtilities::kRenderQuantumFrames);
+ const float* up_y =
+ Listener()->GetUpYValues(AudioUtilities::kRenderQuantumFrames);
+ const float* up_z =
+ Listener()->GetUpZValues(AudioUtilities::kRenderQuantumFrames);
+
+ // Compute the azimuth, elevation, and total gains for each position.
+ double azimuth[AudioUtilities::kRenderQuantumFrames];
+ double elevation[AudioUtilities::kRenderQuantumFrames];
+ float total_gain[AudioUtilities::kRenderQuantumFrames];
+
+ for (unsigned k = 0; k < frames_to_process; ++k) {
+ FloatPoint3D panner_position(panner_x[k], panner_y[k], panner_z[k]);
+ FloatPoint3D orientation(orientation_x[k], orientation_y[k],
+ orientation_z[k]);
+ FloatPoint3D listener_position(listener_x[k], listener_y[k], listener_z[k]);
+ FloatPoint3D listener_forward(forward_x[k], forward_y[k], forward_z[k]);
+ FloatPoint3D listener_up(up_x[k], up_y[k], up_z[k]);
+
+ CalculateAzimuthElevation(&azimuth[k], &elevation[k], panner_position,
+ listener_position, listener_forward, listener_up);
+
+ // Get distance and cone gain
+ total_gain[k] = CalculateDistanceConeGain(panner_position, orientation,
+ listener_position);
+ }
+
+ panner_->PanWithSampleAccurateValues(azimuth, elevation, source, destination,
+ frames_to_process,
+ InternalChannelInterpretation());
+ destination->CopyWithSampleAccurateGainValuesFrom(*destination, total_gain,
+ frames_to_process);
+}
+
+void PannerHandler::ProcessOnlyAudioParams(size_t frames_to_process) {
+ float values[AudioUtilities::kRenderQuantumFrames];
+
+ DCHECK_LE(frames_to_process, AudioUtilities::kRenderQuantumFrames);
+
+ position_x_->CalculateSampleAccurateValues(values, frames_to_process);
+ position_y_->CalculateSampleAccurateValues(values, frames_to_process);
+ position_z_->CalculateSampleAccurateValues(values, frames_to_process);
+
+ orientation_x_->CalculateSampleAccurateValues(values, frames_to_process);
+ orientation_y_->CalculateSampleAccurateValues(values, frames_to_process);
+ orientation_z_->CalculateSampleAccurateValues(values, frames_to_process);
+}
+
+void PannerHandler::Initialize() {
+ if (IsInitialized())
+ return;
+
+ panner_ = Panner::Create(panning_model_, Context()->sampleRate(),
+ Listener()->HrtfDatabaseLoader());
+ Listener()->AddPanner(*this);
+
+ // Set the cached values to the current values to start things off. The
+ // panner is already marked as dirty, so this won't matter.
+ last_position_ = GetPosition();
+ last_orientation_ = Orientation();
+
+ AudioHandler::Initialize();
+}
+
+void PannerHandler::Uninitialize() {
+ if (!IsInitialized())
+ return;
+
+ panner_.reset();
+ Listener()->RemovePanner(*this);
+
+ AudioHandler::Uninitialize();
+}
+
+AudioListener* PannerHandler::Listener() {
+ return listener_;
+}
+
+String PannerHandler::PanningModel() const {
+ switch (panning_model_) {
+ case Panner::kPanningModelEqualPower:
+ return "equalpower";
+ case Panner::kPanningModelHRTF:
+ return "HRTF";
+ default:
+ NOTREACHED();
+ return "equalpower";
+ }
+}
+
+void PannerHandler::SetPanningModel(const String& model) {
+ // WebIDL should guarantee that we are never called with an invalid string
+ // for the model.
+ if (model == "equalpower")
+ SetPanningModel(Panner::kPanningModelEqualPower);
+ else if (model == "HRTF")
+ SetPanningModel(Panner::kPanningModelHRTF);
+ else
+ NOTREACHED();
+}
+
+// This method should only be called from setPanningModel(const String&)!
+bool PannerHandler::SetPanningModel(unsigned model) {
+ DEFINE_STATIC_LOCAL(EnumerationHistogram, panning_model_histogram,
+ ("WebAudio.PannerNode.PanningModel", 2));
+ panning_model_histogram.Count(model);
+
+ if (model == Panner::kPanningModelHRTF) {
+ // Load the HRTF database asynchronously so we don't block the
+ // Javascript thread while creating the HRTF database. It's ok to call
+ // this multiple times; we won't be constantly loading the database over
+ // and over.
+ Listener()->CreateAndLoadHRTFDatabaseLoader(Context()->sampleRate());
+ }
+
+ if (!panner_.get() || model != panning_model_) {
+ // This synchronizes with process().
+ MutexLocker process_locker(process_lock_);
+ panner_ = Panner::Create(model, Context()->sampleRate(),
+ Listener()->HrtfDatabaseLoader());
+ panning_model_ = model;
+ }
+ return true;
+}
+
+String PannerHandler::DistanceModel() const {
+ switch (const_cast<PannerHandler*>(this)->distance_effect_.Model()) {
+ case DistanceEffect::kModelLinear:
+ return "linear";
+ case DistanceEffect::kModelInverse:
+ return "inverse";
+ case DistanceEffect::kModelExponential:
+ return "exponential";
+ default:
+ NOTREACHED();
+ return "inverse";
+ }
+}
+
+void PannerHandler::SetDistanceModel(const String& model) {
+ if (model == "linear")
+ SetDistanceModel(DistanceEffect::kModelLinear);
+ else if (model == "inverse")
+ SetDistanceModel(DistanceEffect::kModelInverse);
+ else if (model == "exponential")
+ SetDistanceModel(DistanceEffect::kModelExponential);
+}
+
+bool PannerHandler::SetDistanceModel(unsigned model) {
+ switch (model) {
+ case DistanceEffect::kModelLinear:
+ case DistanceEffect::kModelInverse:
+ case DistanceEffect::kModelExponential:
+ if (model != distance_model_) {
+ // This synchronizes with process().
+ MutexLocker process_locker(process_lock_);
+ distance_effect_.SetModel(
+ static_cast<DistanceEffect::ModelType>(model));
+ distance_model_ = model;
+ }
+ break;
+ default:
+ NOTREACHED();
+ return false;
+ }
+
+ return true;
+}
+
+void PannerHandler::SetRefDistance(double distance) {
+ if (RefDistance() == distance)
+ return;
+
+ // This synchronizes with process().
+ MutexLocker process_locker(process_lock_);
+ distance_effect_.SetRefDistance(distance);
+ MarkPannerAsDirty(PannerHandler::kDistanceConeGainDirty);
+}
+
+void PannerHandler::SetMaxDistance(double distance) {
+ if (MaxDistance() == distance)
+ return;
+
+ // This synchronizes with process().
+ MutexLocker process_locker(process_lock_);
+ distance_effect_.SetMaxDistance(distance);
+ MarkPannerAsDirty(PannerHandler::kDistanceConeGainDirty);
+}
+
+void PannerHandler::SetRolloffFactor(double factor) {
+ if (RolloffFactor() == factor)
+ return;
+
+ // This synchronizes with process().
+ MutexLocker process_locker(process_lock_);
+ distance_effect_.SetRolloffFactor(factor);
+ MarkPannerAsDirty(PannerHandler::kDistanceConeGainDirty);
+}
+
+void PannerHandler::SetConeInnerAngle(double angle) {
+ if (ConeInnerAngle() == angle)
+ return;
+
+ // This synchronizes with process().
+ MutexLocker process_locker(process_lock_);
+ cone_effect_.SetInnerAngle(angle);
+ MarkPannerAsDirty(PannerHandler::kDistanceConeGainDirty);
+}
+
+void PannerHandler::SetConeOuterAngle(double angle) {
+ if (ConeOuterAngle() == angle)
+ return;
+
+ // This synchronizes with process().
+ MutexLocker process_locker(process_lock_);
+ cone_effect_.SetOuterAngle(angle);
+ MarkPannerAsDirty(PannerHandler::kDistanceConeGainDirty);
+}
+
+void PannerHandler::SetConeOuterGain(double angle) {
+ if (ConeOuterGain() == angle)
+ return;
+
+ // This synchronizes with process().
+ MutexLocker process_locker(process_lock_);
+ cone_effect_.SetOuterGain(angle);
+ MarkPannerAsDirty(PannerHandler::kDistanceConeGainDirty);
+}
+
+void PannerHandler::SetPosition(float x,
+ float y,
+ float z,
+ ExceptionState& exceptionState) {
+ // This synchronizes with process().
+ MutexLocker process_locker(process_lock_);
+
+ double now = Context()->currentTime();
+
+ position_x_->Timeline().SetValueAtTime(x, now, exceptionState);
+ position_y_->Timeline().SetValueAtTime(y, now, exceptionState);
+ position_z_->Timeline().SetValueAtTime(z, now, exceptionState);
+
+ MarkPannerAsDirty(PannerHandler::kAzimuthElevationDirty |
+ PannerHandler::kDistanceConeGainDirty);
+}
+
+void PannerHandler::SetOrientation(float x,
+ float y,
+ float z,
+ ExceptionState& exceptionState) {
+ // This synchronizes with process().
+ MutexLocker process_locker(process_lock_);
+
+ double now = Context()->currentTime();
+
+ orientation_x_->Timeline().SetValueAtTime(x, now, exceptionState);
+ orientation_y_->Timeline().SetValueAtTime(y, now, exceptionState);
+ orientation_z_->Timeline().SetValueAtTime(z, now, exceptionState);
+
+ MarkPannerAsDirty(PannerHandler::kDistanceConeGainDirty);
+}
+
+void PannerHandler::CalculateAzimuthElevation(
+ double* out_azimuth,
+ double* out_elevation,
+ const FloatPoint3D& position,
+ const FloatPoint3D& listener_position,
+ const FloatPoint3D& listener_forward,
+ const FloatPoint3D& listener_up) {
+ // Calculate the source-listener vector
+ FloatPoint3D source_listener = position - listener_position;
+
+ // Quick default return if the source and listener are at the same position.
+ if (source_listener.IsZero()) {
+ *out_azimuth = 0;
+ *out_elevation = 0;
+ return;
+ }
+
+ // normalize() does nothing if the length of |sourceListener| is zero.
+ source_listener.Normalize();
+
+ // Align axes
+ FloatPoint3D listener_right = listener_forward.Cross(listener_up);
+ listener_right.Normalize();
+
+ FloatPoint3D listener_forward_norm = listener_forward;
+ listener_forward_norm.Normalize();
+
+ FloatPoint3D up = listener_right.Cross(listener_forward_norm);
+
+ float up_projection = source_listener.Dot(up);
+
+ FloatPoint3D projected_source = source_listener - up_projection * up;
+
+ double azimuth = rad2deg(projected_source.AngleBetween(listener_right));
+ FixNANs(azimuth); // avoid illegal values
+
+ // Source in front or behind the listener
+ double front_back = projected_source.Dot(listener_forward_norm);
+ if (front_back < 0.0)
+ azimuth = 360.0 - azimuth;
+
+ // Make azimuth relative to "front" and not "right" listener vector
+ if ((azimuth >= 0.0) && (azimuth <= 270.0))
+ azimuth = 90.0 - azimuth;
+ else
+ azimuth = 450.0 - azimuth;
+
+ // Elevation
+ double elevation = 90 - rad2deg(source_listener.AngleBetween(up));
+ FixNANs(elevation); // avoid illegal values
+
+ if (elevation > 90.0)
+ elevation = 180.0 - elevation;
+ else if (elevation < -90.0)
+ elevation = -180.0 - elevation;
+
+ if (out_azimuth)
+ *out_azimuth = azimuth;
+ if (out_elevation)
+ *out_elevation = elevation;
+}
+
+float PannerHandler::CalculateDistanceConeGain(
+ const FloatPoint3D& position,
+ const FloatPoint3D& orientation,
+ const FloatPoint3D& listener_position) {
+ double listener_distance = position.DistanceTo(listener_position);
+ double distance_gain = distance_effect_.Gain(listener_distance);
+ double cone_gain =
+ cone_effect_.Gain(position, orientation, listener_position);
+
+ return float(distance_gain * cone_gain);
+}
+
+void PannerHandler::AzimuthElevation(double* out_azimuth,
+ double* out_elevation) {
+ DCHECK(Context()->IsAudioThread());
+
+ // Calculate new azimuth and elevation if the panner or the listener changed
+ // position or orientation in any way.
+ if (IsAzimuthElevationDirty() || Listener()->IsListenerDirty()) {
+ CalculateAzimuthElevation(&cached_azimuth_, &cached_elevation_,
+ GetPosition(), Listener()->GetPosition(),
+ Listener()->Orientation(),
+ Listener()->UpVector());
+ is_azimuth_elevation_dirty_ = false;
+ }
+
+ *out_azimuth = cached_azimuth_;
+ *out_elevation = cached_elevation_;
+}
+
+float PannerHandler::DistanceConeGain() {
+ DCHECK(Context()->IsAudioThread());
+
+ // Calculate new distance and cone gain if the panner or the listener
+ // changed position or orientation in any way.
+ if (IsDistanceConeGainDirty() || Listener()->IsListenerDirty()) {
+ cached_distance_cone_gain_ = CalculateDistanceConeGain(
+ GetPosition(), Orientation(), Listener()->GetPosition());
+ is_distance_cone_gain_dirty_ = false;
+ }
+
+ return cached_distance_cone_gain_;
+}
+
+void PannerHandler::MarkPannerAsDirty(unsigned dirty) {
+ if (dirty & PannerHandler::kAzimuthElevationDirty)
+ is_azimuth_elevation_dirty_ = true;
+
+ if (dirty & PannerHandler::kDistanceConeGainDirty)
+ is_distance_cone_gain_dirty_ = true;
+}
+
+void PannerHandler::SetChannelCount(unsigned long channel_count,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+ BaseAudioContext::GraphAutoLocker locker(Context());
+
+ // A PannerNode only supports 1 or 2 channels
+ if (channel_count > 0 && channel_count <= 2) {
+ if (channel_count_ != channel_count) {
+ channel_count_ = channel_count;
+ if (InternalChannelCountMode() != kMax)
+ UpdateChannelsForInputs();
+ }
+ } else {
+ exception_state.ThrowDOMException(
+ kNotSupportedError, ExceptionMessages::IndexOutsideRange<unsigned long>(
+ "channelCount", channel_count, 1,
+ ExceptionMessages::kInclusiveBound, 2,
+ ExceptionMessages::kInclusiveBound));
+ }
+}
+
+void PannerHandler::SetChannelCountMode(const String& mode,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+ BaseAudioContext::GraphAutoLocker locker(Context());
+
+ ChannelCountMode old_mode = InternalChannelCountMode();
+
+ if (mode == "clamped-max") {
+ new_channel_count_mode_ = kClampedMax;
+ } else if (mode == "explicit") {
+ new_channel_count_mode_ = kExplicit;
+ } else if (mode == "max") {
+ // This is not supported for a PannerNode, which can only handle 1 or 2
+ // channels.
+ exception_state.ThrowDOMException(kNotSupportedError,
+ "Panner: 'max' is not allowed");
+ new_channel_count_mode_ = old_mode;
+ } else {
+ // Do nothing for other invalid values.
+ new_channel_count_mode_ = old_mode;
+ }
+
+ if (new_channel_count_mode_ != old_mode)
+ Context()->GetDeferredTaskHandler().AddChangedChannelCountMode(this);
+}
+
+bool PannerHandler::HasSampleAccurateValues() const {
+ return position_x_->HasSampleAccurateValues() ||
+ position_y_->HasSampleAccurateValues() ||
+ position_z_->HasSampleAccurateValues() ||
+ orientation_x_->HasSampleAccurateValues() ||
+ orientation_y_->HasSampleAccurateValues() ||
+ orientation_z_->HasSampleAccurateValues();
+}
+
+void PannerHandler::UpdateDirtyState() {
+ DCHECK(Context()->IsAudioThread());
+
+ FloatPoint3D current_position = GetPosition();
+ FloatPoint3D current_orientation = Orientation();
+
+ bool has_moved = current_position != last_position_ ||
+ current_orientation != last_orientation_;
+
+ if (has_moved) {
+ last_position_ = current_position;
+ last_orientation_ = current_orientation;
+
+ MarkPannerAsDirty(PannerHandler::kAzimuthElevationDirty |
+ PannerHandler::kDistanceConeGainDirty);
+ }
+}
+
+bool PannerHandler::RequiresTailProcessing() const {
+ // If there's no internal panner method set up yet, assume we require tail
+ // processing in case the HRTF panner is set later, which does require tail
+ // processing.
+ return panner_ ? panner_->RequiresTailProcessing() : true;
+}
+
+// ----------------------------------------------------------------
+
+PannerNode::PannerNode(BaseAudioContext& context)
+ : AudioNode(context),
+ position_x_(AudioParam::Create(context,
+ kParamTypePannerPositionX,
+ "Panner.positionX",
+ 0.0)),
+ position_y_(AudioParam::Create(context,
+ kParamTypePannerPositionY,
+ "Panner.positionY",
+ 0.0)),
+ position_z_(AudioParam::Create(context,
+ kParamTypePannerPositionZ,
+ "Panner.positionZ",
+ 0.0)),
+ orientation_x_(AudioParam::Create(context,
+ kParamTypePannerOrientationX,
+ "Panner.orientationX",
+ 1.0)),
+ orientation_y_(AudioParam::Create(context,
+ kParamTypePannerOrientationY,
+ "Panner.orientationY",
+ 0.0)),
+ orientation_z_(AudioParam::Create(context,
+ kParamTypePannerOrientationZ,
+ "Panner.orientationZ",
+ 0.0)) {
+ SetHandler(PannerHandler::Create(
+ *this, context.sampleRate(), position_x_->Handler(),
+ position_y_->Handler(), position_z_->Handler(), orientation_x_->Handler(),
+ orientation_y_->Handler(), orientation_z_->Handler()));
+}
+
+PannerNode* PannerNode::Create(BaseAudioContext& context,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ if (context.IsContextClosed()) {
+ context.ThrowExceptionForClosedState(exception_state);
+ return nullptr;
+ }
+
+ return new PannerNode(context);
+}
+
+PannerNode* PannerNode::Create(BaseAudioContext* context,
+ const PannerOptions& options,
+ ExceptionState& exception_state) {
+ PannerNode* node = Create(*context, exception_state);
+
+ if (!node)
+ return nullptr;
+
+ node->HandleChannelOptions(options, exception_state);
+
+ node->setPanningModel(options.panningModel());
+ node->setDistanceModel(options.distanceModel());
+
+ node->positionX()->setValue(options.positionX());
+ node->positionY()->setValue(options.positionY());
+ node->positionZ()->setValue(options.positionZ());
+
+ node->orientationX()->setValue(options.orientationX());
+ node->orientationY()->setValue(options.orientationY());
+ node->orientationZ()->setValue(options.orientationZ());
+
+ node->setRefDistance(options.refDistance(), exception_state);
+ node->setMaxDistance(options.maxDistance(), exception_state);
+ node->setRolloffFactor(options.rolloffFactor());
+ node->setConeInnerAngle(options.coneInnerAngle());
+ node->setConeOuterAngle(options.coneOuterAngle());
+ node->setConeOuterGain(options.coneOuterGain());
+
+ return node;
+}
+
+PannerHandler& PannerNode::GetPannerHandler() const {
+ return static_cast<PannerHandler&>(Handler());
+}
+
+String PannerNode::panningModel() const {
+ return GetPannerHandler().PanningModel();
+}
+
+void PannerNode::setPanningModel(const String& model) {
+ GetPannerHandler().SetPanningModel(model);
+}
+
+void PannerNode::setPosition(float x,
+ float y,
+ float z,
+ ExceptionState& exceptionState) {
+ GetPannerHandler().SetPosition(x, y, z, exceptionState);
+}
+
+void PannerNode::setOrientation(float x,
+ float y,
+ float z,
+ ExceptionState& exceptionState) {
+ GetPannerHandler().SetOrientation(x, y, z, exceptionState);
+}
+
+String PannerNode::distanceModel() const {
+ return GetPannerHandler().DistanceModel();
+}
+
+void PannerNode::setDistanceModel(const String& model) {
+ GetPannerHandler().SetDistanceModel(model);
+}
+
+double PannerNode::refDistance() const {
+ return GetPannerHandler().RefDistance();
+}
+
+void PannerNode::setRefDistance(double distance,
+ ExceptionState& exception_state) {
+ if (distance < 0) {
+ exception_state.ThrowDOMException(
+ kV8RangeError, ExceptionMessages::IndexExceedsMinimumBound<double>(
+ "refDistance", distance, 0));
+ return;
+ }
+
+ GetPannerHandler().SetRefDistance(distance);
+}
+
+double PannerNode::maxDistance() const {
+ return GetPannerHandler().MaxDistance();
+}
+
+void PannerNode::setMaxDistance(double distance,
+ ExceptionState& exception_state) {
+ if (distance <= 0) {
+ exception_state.ThrowDOMException(
+ kV8RangeError, ExceptionMessages::IndexExceedsMinimumBound<double>(
+ "maxDistance", distance, 0));
+ return;
+ }
+
+ GetPannerHandler().SetMaxDistance(distance);
+}
+
+double PannerNode::rolloffFactor() const {
+ return GetPannerHandler().RolloffFactor();
+}
+
+void PannerNode::setRolloffFactor(double factor) {
+ GetPannerHandler().SetRolloffFactor(factor);
+}
+
+double PannerNode::coneInnerAngle() const {
+ return GetPannerHandler().ConeInnerAngle();
+}
+
+void PannerNode::setConeInnerAngle(double angle) {
+ GetPannerHandler().SetConeInnerAngle(angle);
+}
+
+double PannerNode::coneOuterAngle() const {
+ return GetPannerHandler().ConeOuterAngle();
+}
+
+void PannerNode::setConeOuterAngle(double angle) {
+ GetPannerHandler().SetConeOuterAngle(angle);
+}
+
+double PannerNode::coneOuterGain() const {
+ return GetPannerHandler().ConeOuterGain();
+}
+
+void PannerNode::setConeOuterGain(double gain) {
+ GetPannerHandler().SetConeOuterGain(gain);
+}
+
+void PannerNode::Trace(blink::Visitor* visitor) {
+ visitor->Trace(position_x_);
+ visitor->Trace(position_y_);
+ visitor->Trace(position_z_);
+
+ visitor->Trace(orientation_x_);
+ visitor->Trace(orientation_y_);
+ visitor->Trace(orientation_z_);
+
+ AudioNode::Trace(visitor);
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/panner_node.h b/chromium/third_party/blink/renderer/modules/webaudio/panner_node.h
new file mode 100644
index 00000000000..61eb02a12ff
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/panner_node.h
@@ -0,0 +1,261 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_PANNER_NODE_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_PANNER_NODE_H_
+
+#include <memory>
+#include "third_party/blink/renderer/modules/webaudio/audio_listener.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_param.h"
+#include "third_party/blink/renderer/platform/audio/audio_bus.h"
+#include "third_party/blink/renderer/platform/audio/cone.h"
+#include "third_party/blink/renderer/platform/audio/distance_effect.h"
+#include "third_party/blink/renderer/platform/audio/panner.h"
+#include "third_party/blink/renderer/platform/geometry/float_point_3d.h"
+#include "third_party/blink/renderer/platform/wtf/hash_map.h"
+
+namespace blink {
+
+class BaseAudioContext;
+class PannerOptions;
+
+// PannerNode is an AudioNode with one input and one output.
+// It positions a sound in 3D space, with the exact effect dependent on the
+// panning model. It has a position and an orientation in 3D space which is
+// relative to the position and orientation of the context's AudioListener. A
+// distance effect will attenuate the gain as the position moves away from the
+// listener. A cone effect will attenuate the gain as the orientation moves
+// away from the listener. All of these effects follow the OpenAL specification
+// very closely.
+
+class PannerHandler final : public AudioHandler {
+ public:
+ // These enums are used to distinguish what cached values of panner are dirty.
+ enum {
+ kAzimuthElevationDirty = 0x1,
+ kDistanceConeGainDirty = 0x2,
+ };
+
+ static scoped_refptr<PannerHandler> Create(AudioNode&,
+ float sample_rate,
+ AudioParamHandler& position_x,
+ AudioParamHandler& position_y,
+ AudioParamHandler& position_z,
+ AudioParamHandler& orientation_x,
+ AudioParamHandler& orientation_y,
+ AudioParamHandler& orientation_z);
+
+ ~PannerHandler() override;
+
+ // AudioHandler
+ void Process(size_t frames_to_process) override;
+ void ProcessSampleAccurateValues(AudioBus* destination,
+ const AudioBus* source,
+ size_t frames_to_process);
+ void ProcessOnlyAudioParams(size_t frames_to_process) override;
+ void Initialize() override;
+ void Uninitialize() override;
+
+ // Panning model
+ String PanningModel() const;
+ void SetPanningModel(const String&);
+
+ // Position and orientation
+ void SetPosition(float x, float y, float z, ExceptionState&);
+ void SetOrientation(float x, float y, float z, ExceptionState&);
+
+ // Distance parameters
+ String DistanceModel() const;
+ void SetDistanceModel(const String&);
+
+ double RefDistance() { return distance_effect_.RefDistance(); }
+ void SetRefDistance(double);
+
+ double MaxDistance() { return distance_effect_.MaxDistance(); }
+ void SetMaxDistance(double);
+
+ double RolloffFactor() { return distance_effect_.RolloffFactor(); }
+ void SetRolloffFactor(double);
+
+ // Sound cones - angles in degrees
+ double ConeInnerAngle() const { return cone_effect_.InnerAngle(); }
+ void SetConeInnerAngle(double);
+
+ double ConeOuterAngle() const { return cone_effect_.OuterAngle(); }
+ void SetConeOuterAngle(double);
+
+ double ConeOuterGain() const { return cone_effect_.OuterGain(); }
+ void SetConeOuterGain(double);
+
+ void MarkPannerAsDirty(unsigned);
+
+ double TailTime() const override { return panner_ ? panner_->TailTime() : 0; }
+ double LatencyTime() const override {
+ return panner_ ? panner_->LatencyTime() : 0;
+ }
+ bool RequiresTailProcessing() const final;
+
+ void SetChannelCount(unsigned long, ExceptionState&) final;
+ void SetChannelCountMode(const String&, ExceptionState&) final;
+
+ private:
+ PannerHandler(AudioNode&,
+ float sample_rate,
+ AudioParamHandler& position_x,
+ AudioParamHandler& position_y,
+ AudioParamHandler& position_z,
+ AudioParamHandler& orientation_x,
+ AudioParamHandler& orientation_y,
+ AudioParamHandler& orientation_z);
+
+ // BaseAudioContext's listener
+ AudioListener* Listener();
+
+ bool SetPanningModel(unsigned); // Returns true on success.
+ bool SetDistanceModel(unsigned); // Returns true on success.
+
+ void CalculateAzimuthElevation(double* out_azimuth,
+ double* out_elevation,
+ const FloatPoint3D& position,
+ const FloatPoint3D& listener_position,
+ const FloatPoint3D& listener_forward,
+ const FloatPoint3D& listener_up);
+
+ // Returns the combined distance and cone gain attenuation.
+ float CalculateDistanceConeGain(const FloatPoint3D& position,
+ const FloatPoint3D& orientation,
+ const FloatPoint3D& listener_position);
+
+ void AzimuthElevation(double* out_azimuth, double* out_elevation);
+ float DistanceConeGain();
+
+ bool IsAzimuthElevationDirty() const { return is_azimuth_elevation_dirty_; }
+ bool IsDistanceConeGainDirty() const { return is_distance_cone_gain_dirty_; }
+ void UpdateDirtyState();
+
+ // This Persistent doesn't make a reference cycle including the owner
+ // PannerNode. It is accessed by both audio and main thread.
+ CrossThreadPersistent<AudioListener> listener_;
+ std::unique_ptr<Panner> panner_;
+ unsigned panning_model_;
+ unsigned distance_model_;
+
+ bool is_azimuth_elevation_dirty_;
+ bool is_distance_cone_gain_dirty_;
+
+ // Gain
+ DistanceEffect distance_effect_;
+ ConeEffect cone_effect_;
+
+ // Cached values
+ double cached_azimuth_;
+ double cached_elevation_;
+ float cached_distance_cone_gain_;
+
+ const FloatPoint3D GetPosition() const {
+ return FloatPoint3D(position_x_->Value(), position_y_->Value(),
+ position_z_->Value());
+ }
+
+ const FloatPoint3D Orientation() const {
+ return FloatPoint3D(orientation_x_->Value(), orientation_y_->Value(),
+ orientation_z_->Value());
+ }
+
+ // True if any of this panner's AudioParams have automations.
+ bool HasSampleAccurateValues() const;
+
+ scoped_refptr<AudioParamHandler> position_x_;
+ scoped_refptr<AudioParamHandler> position_y_;
+ scoped_refptr<AudioParamHandler> position_z_;
+
+ scoped_refptr<AudioParamHandler> orientation_x_;
+ scoped_refptr<AudioParamHandler> orientation_y_;
+ scoped_refptr<AudioParamHandler> orientation_z_;
+
+ FloatPoint3D last_position_;
+ FloatPoint3D last_orientation_;
+
+ // Synchronize process() with setting of the panning model, source's location
+ // information, listener, distance parameters and sound cones.
+ mutable Mutex process_lock_;
+};
+
+class PannerNode final : public AudioNode {
+ DEFINE_WRAPPERTYPEINFO();
+
+ public:
+ static PannerNode* Create(BaseAudioContext&, ExceptionState&);
+ static PannerNode* Create(BaseAudioContext*,
+ const PannerOptions&,
+ ExceptionState&);
+ PannerHandler& GetPannerHandler() const;
+
+ virtual void Trace(blink::Visitor*);
+
+ // Uses a 3D cartesian coordinate system
+ AudioParam* positionX() const { return position_x_; };
+ AudioParam* positionY() const { return position_y_; };
+ AudioParam* positionZ() const { return position_z_; };
+
+ AudioParam* orientationX() const { return orientation_x_; };
+ AudioParam* orientationY() const { return orientation_y_; };
+ AudioParam* orientationZ() const { return orientation_z_; };
+
+ String panningModel() const;
+ void setPanningModel(const String&);
+ void setPosition(float x, float y, float z, ExceptionState&);
+ void setOrientation(float x, float y, float z, ExceptionState&);
+ String distanceModel() const;
+ void setDistanceModel(const String&);
+ double refDistance() const;
+ void setRefDistance(double, ExceptionState&);
+ double maxDistance() const;
+ void setMaxDistance(double, ExceptionState&);
+ double rolloffFactor() const;
+ void setRolloffFactor(double);
+ double coneInnerAngle() const;
+ void setConeInnerAngle(double);
+ double coneOuterAngle() const;
+ void setConeOuterAngle(double);
+ double coneOuterGain() const;
+ void setConeOuterGain(double);
+
+ private:
+ PannerNode(BaseAudioContext&);
+
+ Member<AudioParam> position_x_;
+ Member<AudioParam> position_y_;
+ Member<AudioParam> position_z_;
+
+ Member<AudioParam> orientation_x_;
+ Member<AudioParam> orientation_y_;
+ Member<AudioParam> orientation_z_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_PANNER_NODE_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/panner_node.idl b/chromium/third_party/blink/renderer/modules/webaudio/panner_node.idl
new file mode 100644
index 00000000000..3acde7e5519
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/panner_node.idl
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+// See https://webaudio.github.io/web-audio-api/#pannernode
+enum PanningModelType {
+ "equalpower",
+ "HRTF"
+};
+
+enum DistanceModelType {
+ "linear",
+ "inverse",
+ "exponential"
+};
+
+[
+ Constructor(BaseAudioContext context, optional PannerOptions options),
+ RaisesException=Constructor,
+ Measure
+] interface PannerNode : AudioNode {
+ // Default model for stereo is equalpower.
+ attribute PanningModelType panningModel;
+
+ // Uses a 3D cartesian coordinate system
+ [RaisesException, MeasureAs=PannerNodeSetPosition] void setPosition(float x, float y, float z);
+ [RaisesException, MeasureAs=PannerNodeSetOrientation] void setOrientation(float x, float y, float z);
+
+ // Uses a 3D cartesian coordinate system
+ readonly attribute AudioParam positionX;
+ readonly attribute AudioParam positionY;
+ readonly attribute AudioParam positionZ;
+
+ readonly attribute AudioParam orientationX;
+ readonly attribute AudioParam orientationY;
+ readonly attribute AudioParam orientationZ;
+
+ // Distance model
+ attribute DistanceModelType distanceModel;
+
+ [RaisesException=Setter] attribute double refDistance;
+ [RaisesException=Setter] attribute double maxDistance;
+ attribute double rolloffFactor;
+
+ // Directional sound cone
+ attribute double coneInnerAngle;
+ attribute double coneOuterAngle;
+ attribute double coneOuterGain;
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/panner_options.idl b/chromium/third_party/blink/renderer/modules/webaudio/panner_options.idl
new file mode 100644
index 00000000000..a119529e9e4
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/panner_options.idl
@@ -0,0 +1,23 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See https://webaudio.github.io/web-audio-api/#dictdef-panneroptions
+dictionary PannerOptions : AudioNodeOptions {
+ PanningModelType panningModel = "equalpower";
+ DistanceModelType distanceModel = "inverse";
+
+ float positionX = 0;
+ float positionY = 0;
+ float positionZ = 0;
+ float orientationX = 1;
+ float orientationY = 0;
+ float orientationZ = 0;
+
+ double refDistance = 1;
+ double maxDistance= 10000;
+ double rolloffFactor = 1;
+ double coneInnerAngle = 360;
+ double coneOuterAngle = 360;
+ double coneOuterGain = 0;
+}; \ No newline at end of file
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/periodic_wave.cc b/chromium/third_party/blink/renderer/modules/webaudio/periodic_wave.cc
new file mode 100644
index 00000000000..675bf79980c
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/periodic_wave.cc
@@ -0,0 +1,389 @@
+/*
+ * Copyright (C) 2012 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <algorithm>
+#include <memory>
+#include "third_party/blink/renderer/bindings/core/v8/exception_messages.h"
+#include "third_party/blink/renderer/bindings/core/v8/exception_state.h"
+#include "third_party/blink/renderer/core/dom/exception_code.h"
+#include "third_party/blink/renderer/modules/webaudio/base_audio_context.h"
+#include "third_party/blink/renderer/modules/webaudio/oscillator_node.h"
+#include "third_party/blink/renderer/modules/webaudio/periodic_wave.h"
+#include "third_party/blink/renderer/modules/webaudio/periodic_wave_options.h"
+#include "third_party/blink/renderer/platform/audio/fft_frame.h"
+#include "third_party/blink/renderer/platform/audio/vector_math.h"
+
+namespace blink {
+
+// The number of bands per octave. Each octave will have this many entries in
+// the wave tables.
+const unsigned kNumberOfOctaveBands = 3;
+
+// The max length of a periodic wave. This must be a power of two greater than
+// or equal to 2048 and must be supported by the FFT routines.
+const unsigned kMaxPeriodicWaveSize = 16384;
+
+const float kCentsPerRange = 1200 / kNumberOfOctaveBands;
+
+using namespace VectorMath;
+
+PeriodicWave* PeriodicWave::Create(BaseAudioContext& context,
+ const Vector<float>& real,
+ const Vector<float>& imag,
+ bool disable_normalization,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ if (context.IsContextClosed()) {
+ context.ThrowExceptionForClosedState(exception_state);
+ return nullptr;
+ }
+
+ if (real.size() != imag.size()) {
+ exception_state.ThrowDOMException(
+ kIndexSizeError, "length of real array (" +
+ String::Number(real.size()) +
+ ") and length of imaginary array (" +
+ String::Number(imag.size()) + ") must match.");
+ return nullptr;
+ }
+
+ PeriodicWave* periodic_wave = new PeriodicWave(context.sampleRate());
+ periodic_wave->CreateBandLimitedTables(real.data(), imag.data(), real.size(),
+ disable_normalization);
+ return periodic_wave;
+}
+
+PeriodicWave* PeriodicWave::Create(BaseAudioContext* context,
+ const PeriodicWaveOptions& options,
+ ExceptionState& exception_state) {
+ bool normalize = options.disableNormalization();
+
+ Vector<float> real_coef;
+ Vector<float> imag_coef;
+
+ if (options.hasReal()) {
+ real_coef = options.real();
+ if (options.hasImag())
+ imag_coef = options.imag();
+ else
+ imag_coef.resize(real_coef.size());
+ } else if (options.hasImag()) {
+ // |real| not given, but we have |imag|.
+ imag_coef = options.imag();
+ real_coef.resize(imag_coef.size());
+ } else {
+ // Neither |real| nor |imag| given. Return an object that would
+ // generate a sine wave, which means real = [0,0], and imag = [0, 1]
+ real_coef.resize(2);
+ imag_coef.resize(2);
+ imag_coef[1] = 1;
+ }
+
+ return Create(*context, real_coef, imag_coef, normalize, exception_state);
+}
+
+PeriodicWave* PeriodicWave::CreateSine(float sample_rate) {
+ PeriodicWave* periodic_wave = new PeriodicWave(sample_rate);
+ periodic_wave->GenerateBasicWaveform(OscillatorHandler::SINE);
+ return periodic_wave;
+}
+
+PeriodicWave* PeriodicWave::CreateSquare(float sample_rate) {
+ PeriodicWave* periodic_wave = new PeriodicWave(sample_rate);
+ periodic_wave->GenerateBasicWaveform(OscillatorHandler::SQUARE);
+ return periodic_wave;
+}
+
+PeriodicWave* PeriodicWave::CreateSawtooth(float sample_rate) {
+ PeriodicWave* periodic_wave = new PeriodicWave(sample_rate);
+ periodic_wave->GenerateBasicWaveform(OscillatorHandler::SAWTOOTH);
+ return periodic_wave;
+}
+
+PeriodicWave* PeriodicWave::CreateTriangle(float sample_rate) {
+ PeriodicWave* periodic_wave = new PeriodicWave(sample_rate);
+ periodic_wave->GenerateBasicWaveform(OscillatorHandler::TRIANGLE);
+ return periodic_wave;
+}
+
+PeriodicWave::PeriodicWave(float sample_rate)
+ : v8_external_memory_(0),
+ sample_rate_(sample_rate),
+ cents_per_range_(kCentsPerRange) {
+ float nyquist = 0.5 * sample_rate_;
+ lowest_fundamental_frequency_ = nyquist / MaxNumberOfPartials();
+ rate_scale_ = PeriodicWaveSize() / sample_rate_;
+ // Compute the number of ranges needed to cover the entire frequency range,
+ // assuming kNumberOfOctaveBands per octave.
+ number_of_ranges_ = 0.5 + kNumberOfOctaveBands * log2f(PeriodicWaveSize());
+}
+
+PeriodicWave::~PeriodicWave() {
+ AdjustV8ExternalMemory(-static_cast<int64_t>(v8_external_memory_));
+}
+
+unsigned PeriodicWave::PeriodicWaveSize() const {
+ // Choose an appropriate wave size for the given sample rate. This allows us
+ // to use shorter FFTs when possible to limit the complexity. The breakpoints
+ // here are somewhat arbitrary, but we want sample rates around 44.1 kHz or so
+ // to have a size of 4096 to preserve backward compatibility.
+ if (sample_rate_ <= 24000) {
+ return 2048;
+ }
+
+ if (sample_rate_ <= 88200) {
+ return 4096;
+ }
+
+ return kMaxPeriodicWaveSize;
+}
+
+unsigned PeriodicWave::MaxNumberOfPartials() const {
+ return PeriodicWaveSize() / 2;
+}
+
+void PeriodicWave::WaveDataForFundamentalFrequency(
+ float fundamental_frequency,
+ float*& lower_wave_data,
+ float*& higher_wave_data,
+ float& table_interpolation_factor) {
+ // Negative frequencies are allowed, in which case we alias to the positive
+ // frequency.
+ fundamental_frequency = fabsf(fundamental_frequency);
+
+ // Calculate the pitch range.
+ float ratio = fundamental_frequency > 0
+ ? fundamental_frequency / lowest_fundamental_frequency_
+ : 0.5;
+ float cents_above_lowest_frequency = log2f(ratio) * 1200;
+
+ // Add one to round-up to the next range just in time to truncate partials
+ // before aliasing occurs.
+ float pitch_range = 1 + cents_above_lowest_frequency / cents_per_range_;
+
+ pitch_range = std::max(pitch_range, 0.0f);
+ pitch_range = std::min(pitch_range, static_cast<float>(NumberOfRanges() - 1));
+
+ // The words "lower" and "higher" refer to the table data having the lower and
+ // higher numbers of partials. It's a little confusing since the range index
+ // gets larger the more partials we cull out. So the lower table data will
+ // have a larger range index.
+ unsigned range_index1 = static_cast<unsigned>(pitch_range);
+ unsigned range_index2 =
+ range_index1 < NumberOfRanges() - 1 ? range_index1 + 1 : range_index1;
+
+ lower_wave_data = band_limited_tables_[range_index2]->Data();
+ higher_wave_data = band_limited_tables_[range_index1]->Data();
+
+ // Ranges from 0 -> 1 to interpolate between lower -> higher.
+ table_interpolation_factor = pitch_range - range_index1;
+}
+
+unsigned PeriodicWave::NumberOfPartialsForRange(unsigned range_index) const {
+ // Number of cents below nyquist where we cull partials.
+ float cents_to_cull = range_index * cents_per_range_;
+
+ // A value from 0 -> 1 representing what fraction of the partials to keep.
+ float culling_scale = pow(2, -cents_to_cull / 1200);
+
+ // The very top range will have all the partials culled.
+ unsigned number_of_partials = culling_scale * MaxNumberOfPartials();
+
+ return number_of_partials;
+}
+
+// Tell V8 about the memory we're using so it can properly schedule garbage
+// collects.
+void PeriodicWave::AdjustV8ExternalMemory(int delta) {
+ v8::Isolate::GetCurrent()->AdjustAmountOfExternalAllocatedMemory(delta);
+ v8_external_memory_ += delta;
+}
+
+// Convert into time-domain wave buffers. One table is created for each range
+// for non-aliasing playback at different playback rates. Thus, higher ranges
+// have more high-frequency partials culled out.
+void PeriodicWave::CreateBandLimitedTables(const float* real_data,
+ const float* imag_data,
+ unsigned number_of_components,
+ bool disable_normalization) {
+ // TODO(rtoy): Figure out why this needs to be 0.5 when normalization is
+ // disabled.
+ float normalization_scale = 0.5;
+
+ unsigned fft_size = PeriodicWaveSize();
+ unsigned half_size = fft_size / 2;
+ unsigned i;
+
+ number_of_components = std::min(number_of_components, half_size);
+
+ band_limited_tables_.ReserveCapacity(NumberOfRanges());
+
+ FFTFrame frame(fft_size);
+ for (unsigned range_index = 0; range_index < NumberOfRanges();
+ ++range_index) {
+ // This FFTFrame is used to cull partials (represented by frequency bins).
+ float* real_p = frame.RealData();
+ float* imag_p = frame.ImagData();
+
+ // Copy from loaded frequency data and generate the complex conjugate
+ // because of the way the inverse FFT is defined versus the values in the
+ // arrays. Need to scale the data by fftSize to remove the scaling that the
+ // inverse IFFT would do.
+ float scale = fft_size;
+ Vsmul(real_data, 1, &scale, real_p, 1, number_of_components);
+ scale = -scale;
+ Vsmul(imag_data, 1, &scale, imag_p, 1, number_of_components);
+
+ // Find the starting bin where we should start culling. We need to clear
+ // out the highest frequencies to band-limit the waveform.
+ unsigned number_of_partials = NumberOfPartialsForRange(range_index);
+
+ // If fewer components were provided than 1/2 FFT size, then clear the
+ // remaining bins. We also need to cull the aliasing partials for this
+ // pitch range.
+ for (i = std::min(number_of_components, number_of_partials + 1);
+ i < half_size; ++i) {
+ real_p[i] = 0;
+ imag_p[i] = 0;
+ }
+
+ // Clear packed-nyquist and any DC-offset.
+ real_p[0] = 0;
+ imag_p[0] = 0;
+
+ // Create the band-limited table.
+ unsigned wave_size = PeriodicWaveSize();
+ std::unique_ptr<AudioFloatArray> table =
+ std::make_unique<AudioFloatArray>(wave_size);
+ AdjustV8ExternalMemory(wave_size * sizeof(float));
+ band_limited_tables_.push_back(std::move(table));
+
+ // Apply an inverse FFT to generate the time-domain table data.
+ float* data = band_limited_tables_[range_index]->Data();
+ frame.DoInverseFFT(data);
+
+ // For the first range (which has the highest power), calculate its peak
+ // value then compute normalization scale.
+ if (!disable_normalization) {
+ if (!range_index) {
+ float max_value;
+ Vmaxmgv(data, 1, &max_value, fft_size);
+
+ if (max_value)
+ normalization_scale = 1.0f / max_value;
+ }
+ }
+
+ // Apply normalization scale.
+ Vsmul(data, 1, &normalization_scale, data, 1, fft_size);
+ }
+}
+
+void PeriodicWave::GenerateBasicWaveform(int shape) {
+ unsigned fft_size = PeriodicWaveSize();
+ unsigned half_size = fft_size / 2;
+
+ AudioFloatArray real(half_size);
+ AudioFloatArray imag(half_size);
+ float* real_p = real.Data();
+ float* imag_p = imag.Data();
+
+ // Clear DC and Nyquist.
+ real_p[0] = 0;
+ imag_p[0] = 0;
+
+ for (unsigned n = 1; n < half_size; ++n) {
+ float pi_factor = 2 / (n * piFloat);
+
+ // All waveforms are odd functions with a positive slope at time 0. Hence
+ // the coefficients for cos() are always 0.
+
+ // Fourier coefficients according to standard definition:
+ // b = 1/pi*integrate(f(x)*sin(n*x), x, -pi, pi)
+ // = 2/pi*integrate(f(x)*sin(n*x), x, 0, pi)
+ // since f(x) is an odd function.
+
+ float b; // Coefficient for sin().
+
+ // Calculate Fourier coefficients depending on the shape. Note that the
+ // overall scaling (magnitude) of the waveforms is normalized in
+ // createBandLimitedTables().
+ switch (shape) {
+ case OscillatorHandler::SINE:
+ // Standard sine wave function.
+ b = (n == 1) ? 1 : 0;
+ break;
+ case OscillatorHandler::SQUARE:
+ // Square-shaped waveform with the first half its maximum value and the
+ // second half its minimum value.
+ //
+ // See http://mathworld.wolfram.com/FourierSeriesSquareWave.html
+ //
+ // b[n] = 2/n/pi*(1-(-1)^n)
+ // = 4/n/pi for n odd and 0 otherwise.
+ // = 2*(2/(n*pi)) for n odd
+ b = (n & 1) ? 2 * pi_factor : 0;
+ break;
+ case OscillatorHandler::SAWTOOTH:
+ // Sawtooth-shaped waveform with the first half ramping from zero to
+ // maximum and the second half from minimum to zero.
+ //
+ // b[n] = -2*(-1)^n/pi/n
+ // = (2/(n*pi))*(-1)^(n+1)
+ b = pi_factor * ((n & 1) ? 1 : -1);
+ break;
+ case OscillatorHandler::TRIANGLE:
+ // Triangle-shaped waveform going from 0 at time 0 to 1 at time pi/2 and
+ // back to 0 at time pi.
+ //
+ // See http://mathworld.wolfram.com/FourierSeriesTriangleWave.html
+ //
+ // b[n] = 8*sin(pi*k/2)/(pi*k)^2
+ // = 8/pi^2/n^2*(-1)^((n-1)/2) for n odd and 0 otherwise
+ // = 2*(2/(n*pi))^2 * (-1)^((n-1)/2)
+ if (n & 1) {
+ b = 2 * (pi_factor * pi_factor) * ((((n - 1) >> 1) & 1) ? -1 : 1);
+ } else {
+ b = 0;
+ }
+ break;
+ default:
+ NOTREACHED();
+ b = 0;
+ break;
+ }
+
+ real_p[n] = 0;
+ imag_p[n] = b;
+ }
+
+ CreateBandLimitedTables(real_p, imag_p, half_size, false);
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/periodic_wave.h b/chromium/third_party/blink/renderer/modules/webaudio/periodic_wave.h
new file mode 100644
index 00000000000..0dd89cfd84a
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/periodic_wave.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2012 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_PERIODIC_WAVE_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_PERIODIC_WAVE_H_
+
+#include <memory>
+#include "third_party/blink/renderer/core/typed_arrays/array_buffer_view_helpers.h"
+#include "third_party/blink/renderer/core/typed_arrays/dom_typed_array.h"
+#include "third_party/blink/renderer/platform/audio/audio_array.h"
+#include "third_party/blink/renderer/platform/bindings/script_wrappable.h"
+#include "third_party/blink/renderer/platform/wtf/forward.h"
+#include "third_party/blink/renderer/platform/wtf/vector.h"
+
+namespace blink {
+
+class BaseAudioContext;
+class ExceptionState;
+class PeriodicWaveOptions;
+
+class PeriodicWave final : public ScriptWrappable {
+ DEFINE_WRAPPERTYPEINFO();
+
+ public:
+ static PeriodicWave* CreateSine(float sample_rate);
+ static PeriodicWave* CreateSquare(float sample_rate);
+ static PeriodicWave* CreateSawtooth(float sample_rate);
+ static PeriodicWave* CreateTriangle(float sample_rate);
+
+ // Creates an arbitrary periodic wave given the frequency components (Fourier
+ // coefficients).
+ static PeriodicWave* Create(BaseAudioContext&,
+ const Vector<float>& real,
+ const Vector<float>& imag,
+ bool normalize,
+ ExceptionState&);
+
+ static PeriodicWave* Create(BaseAudioContext*,
+ const PeriodicWaveOptions&,
+ ExceptionState&);
+
+ virtual ~PeriodicWave();
+
+ // Returns pointers to the lower and higher wave data for the pitch range
+ // containing the given fundamental frequency. These two tables are in
+ // adjacent "pitch" ranges where the higher table will have the maximum number
+ // of partials which won't alias when played back at this fundamental
+ // frequency. The lower wave is the next range containing fewer partials than
+ // the higher wave. Interpolation between these two tables can be made
+ // according to tableInterpolationFactor.
+ // Where values from 0 -> 1 interpolate between lower -> higher.
+ void WaveDataForFundamentalFrequency(float,
+ float*& lower_wave_data,
+ float*& higher_wave_data,
+ float& table_interpolation_factor);
+
+ // Returns the scalar multiplier to the oscillator frequency to calculate wave
+ // buffer phase increment.
+ float RateScale() const { return rate_scale_; }
+
+ // The size of the FFT to use based on the sampling rate.
+ unsigned PeriodicWaveSize() const;
+
+ // The number of ranges needed for the given sampling rate and FFT size.
+ unsigned NumberOfRanges() const { return number_of_ranges_; }
+
+ private:
+ explicit PeriodicWave(float sample_rate);
+
+ void GenerateBasicWaveform(int);
+
+ size_t v8_external_memory_;
+
+ float sample_rate_;
+ unsigned number_of_ranges_;
+ float cents_per_range_;
+
+ // The lowest frequency (in Hertz) where playback will include all of the
+ // partials. Playing back lower than this frequency will gradually lose more
+ // high-frequency information. This frequency is quite low (~10Hz @ 44.1KHz)
+ float lowest_fundamental_frequency_;
+
+ float rate_scale_;
+
+ // Maximum possible number of partials (before culling).
+ unsigned MaxNumberOfPartials() const;
+
+ unsigned NumberOfPartialsForRange(unsigned range_index) const;
+
+ void AdjustV8ExternalMemory(int delta);
+
+ // Creates tables based on numberOfComponents Fourier coefficients.
+ void CreateBandLimitedTables(const float* real,
+ const float* imag,
+ unsigned number_of_components,
+ bool disable_normalization);
+ Vector<std::unique_ptr<AudioFloatArray>> band_limited_tables_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_PERIODIC_WAVE_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/periodic_wave.idl b/chromium/third_party/blink/renderer/modules/webaudio/periodic_wave.idl
new file mode 100644
index 00000000000..f640cfa40df
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/periodic_wave.idl
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2012, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+// PeriodicWave represents a periodic audio waveform given by its Fourier coefficients.
+// See https://webaudio.github.io/web-audio-api/#periodicwave
+[
+ Constructor(BaseAudioContext context, optional PeriodicWaveOptions options),
+ RaisesException=Constructor,
+ Measure
+]
+interface PeriodicWave {
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/periodic_wave_constraints.idl b/chromium/third_party/blink/renderer/modules/webaudio/periodic_wave_constraints.idl
new file mode 100644
index 00000000000..d5cf4526539
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/periodic_wave_constraints.idl
@@ -0,0 +1,8 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Seehttps://webaudio.github.io/web-audio-api/#dictdef-periodicwaveconstraints
+dictionary PeriodicWaveConstraints {
+ boolean disableNormalization = false;
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/periodic_wave_options.idl b/chromium/third_party/blink/renderer/modules/webaudio/periodic_wave_options.idl
new file mode 100644
index 00000000000..e452f315ae1
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/periodic_wave_options.idl
@@ -0,0 +1,9 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See https://webaudio.github.io/web-audio-api/#dictdef-periodicwaveoptions
+dictionary PeriodicWaveOptions : PeriodicWaveConstraints {
+ sequence<float> real;
+ sequence<float> imag;
+}; \ No newline at end of file
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/realtime_analyser.cc b/chromium/third_party/blink/renderer/modules/webaudio/realtime_analyser.cc
new file mode 100644
index 00000000000..f061b2e7b50
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/realtime_analyser.cc
@@ -0,0 +1,345 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#include <limits.h>
+#include <algorithm>
+#include <complex>
+#include "third_party/blink/renderer/modules/webaudio/realtime_analyser.h"
+#include "third_party/blink/renderer/platform/audio/audio_bus.h"
+#include "third_party/blink/renderer/platform/audio/audio_utilities.h"
+#include "third_party/blink/renderer/platform/audio/vector_math.h"
+#include "third_party/blink/renderer/platform/wtf/math_extras.h"
+
+namespace blink {
+
+const double RealtimeAnalyser::kDefaultSmoothingTimeConstant = 0.8;
+const double RealtimeAnalyser::kDefaultMinDecibels = -100;
+const double RealtimeAnalyser::kDefaultMaxDecibels = -30;
+
+const unsigned RealtimeAnalyser::kDefaultFFTSize = 2048;
+// All FFT implementations are expected to handle power-of-two sizes
+// MinFFTSize <= size <= MaxFFTSize.
+const unsigned RealtimeAnalyser::kMinFFTSize = 32;
+const unsigned RealtimeAnalyser::kMaxFFTSize = 32768;
+const unsigned RealtimeAnalyser::kInputBufferSize =
+ RealtimeAnalyser::kMaxFFTSize * 2;
+
+RealtimeAnalyser::RealtimeAnalyser()
+ : input_buffer_(kInputBufferSize),
+ write_index_(0),
+ down_mix_bus_(AudioBus::Create(1, AudioUtilities::kRenderQuantumFrames)),
+ fft_size_(kDefaultFFTSize),
+ magnitude_buffer_(kDefaultFFTSize / 2),
+ smoothing_time_constant_(kDefaultSmoothingTimeConstant),
+ min_decibels_(kDefaultMinDecibels),
+ max_decibels_(kDefaultMaxDecibels),
+ last_analysis_time_(-1) {
+ analysis_frame_ = std::make_unique<FFTFrame>(kDefaultFFTSize);
+}
+
+bool RealtimeAnalyser::SetFftSize(size_t size) {
+ DCHECK(IsMainThread());
+
+ // Only allow powers of two within the allowed range.
+ if (size > kMaxFFTSize || size < kMinFFTSize ||
+ !AudioUtilities::IsPowerOfTwo(size))
+ return false;
+
+ if (fft_size_ != size) {
+ analysis_frame_ = std::make_unique<FFTFrame>(size);
+ // m_magnitudeBuffer has size = fftSize / 2 because it contains floats
+ // reduced from complex values in m_analysisFrame.
+ magnitude_buffer_.Allocate(size / 2);
+ fft_size_ = size;
+ }
+
+ return true;
+}
+
+void RealtimeAnalyser::WriteInput(AudioBus* bus, size_t frames_to_process) {
+ bool is_bus_good = bus && bus->NumberOfChannels() > 0 &&
+ bus->Channel(0)->length() >= frames_to_process;
+ DCHECK(is_bus_good);
+ if (!is_bus_good)
+ return;
+
+ unsigned write_index = GetWriteIndex();
+ // FIXME : allow to work with non-FFTSize divisible chunking
+ bool is_destination_good =
+ write_index < input_buffer_.size() &&
+ write_index + frames_to_process <= input_buffer_.size();
+ DCHECK(is_destination_good);
+ if (!is_destination_good)
+ return;
+
+ // Perform real-time analysis
+ float* dest = input_buffer_.Data() + write_index;
+
+ // Clear the bus and downmix the input according to the down mixing rules.
+ // Then save the result in the m_inputBuffer at the appropriate place.
+ down_mix_bus_->Zero();
+ down_mix_bus_->SumFrom(*bus);
+ memcpy(dest, down_mix_bus_->Channel(0)->Data(),
+ frames_to_process * sizeof(*dest));
+
+ write_index += frames_to_process;
+ if (write_index >= kInputBufferSize)
+ write_index = 0;
+ SetWriteIndex(write_index);
+}
+
+namespace {
+
+void ApplyWindow(float* p, size_t n) {
+ DCHECK(IsMainThread());
+
+ // Blackman window
+ double alpha = 0.16;
+ double a0 = 0.5 * (1 - alpha);
+ double a1 = 0.5;
+ double a2 = 0.5 * alpha;
+
+ for (unsigned i = 0; i < n; ++i) {
+ double x = static_cast<double>(i) / static_cast<double>(n);
+ double window =
+ a0 - a1 * cos(twoPiDouble * x) + a2 * cos(twoPiDouble * 2.0 * x);
+ p[i] *= float(window);
+ }
+}
+
+} // namespace
+
+void RealtimeAnalyser::DoFFTAnalysis() {
+ DCHECK(IsMainThread());
+
+ // Unroll the input buffer into a temporary buffer, where we'll apply an
+ // analysis window followed by an FFT.
+ size_t fft_size = this->FftSize();
+
+ AudioFloatArray temporary_buffer(fft_size);
+ float* input_buffer = input_buffer_.Data();
+ float* temp_p = temporary_buffer.Data();
+
+ // Take the previous fftSize values from the input buffer and copy into the
+ // temporary buffer.
+ unsigned write_index = GetWriteIndex();
+ if (write_index < fft_size) {
+ memcpy(temp_p, input_buffer + write_index - fft_size + kInputBufferSize,
+ sizeof(*temp_p) * (fft_size - write_index));
+ memcpy(temp_p + fft_size - write_index, input_buffer,
+ sizeof(*temp_p) * write_index);
+ } else {
+ memcpy(temp_p, input_buffer + write_index - fft_size,
+ sizeof(*temp_p) * fft_size);
+ }
+
+ // Window the input samples.
+ ApplyWindow(temp_p, fft_size);
+
+ // Do the analysis.
+ analysis_frame_->DoFFT(temp_p);
+
+ const float* real_p = analysis_frame_->RealData();
+ float* imag_p = analysis_frame_->ImagData();
+
+ // Blow away the packed nyquist component.
+ imag_p[0] = 0;
+
+ // Normalize so than an input sine wave at 0dBfs registers as 0dBfs (undo FFT
+ // scaling factor).
+ const double magnitude_scale = 1.0 / fft_size;
+
+ // A value of 0 does no averaging with the previous result. Larger values
+ // produce slower, but smoother changes.
+ double k = smoothing_time_constant_;
+ k = std::max(0.0, k);
+ k = std::min(1.0, k);
+
+ // Convert the analysis data from complex to magnitude and average with the
+ // previous result.
+ float* destination = MagnitudeBuffer().Data();
+ size_t n = MagnitudeBuffer().size();
+ for (size_t i = 0; i < n; ++i) {
+ std::complex<double> c(real_p[i], imag_p[i]);
+ double scalar_magnitude = abs(c) * magnitude_scale;
+ destination[i] = float(k * destination[i] + (1 - k) * scalar_magnitude);
+ }
+}
+
+void RealtimeAnalyser::ConvertFloatToDb(DOMFloat32Array* destination_array) {
+ // Convert from linear magnitude to floating-point decibels.
+ unsigned source_length = MagnitudeBuffer().size();
+ size_t len = std::min(source_length, destination_array->length());
+ if (len > 0) {
+ const float* source = MagnitudeBuffer().Data();
+ float* destination = destination_array->Data();
+
+ for (unsigned i = 0; i < len; ++i) {
+ float linear_value = source[i];
+ double db_mag = AudioUtilities::LinearToDecibels(linear_value);
+ destination[i] = float(db_mag);
+ }
+ }
+}
+
+void RealtimeAnalyser::GetFloatFrequencyData(DOMFloat32Array* destination_array,
+ double current_time) {
+ DCHECK(IsMainThread());
+ DCHECK(destination_array);
+
+ if (current_time <= last_analysis_time_) {
+ ConvertFloatToDb(destination_array);
+ return;
+ }
+
+ // Time has advanced since the last call; update the FFT data.
+ last_analysis_time_ = current_time;
+ DoFFTAnalysis();
+
+ ConvertFloatToDb(destination_array);
+}
+
+void RealtimeAnalyser::ConvertToByteData(DOMUint8Array* destination_array) {
+ // Convert from linear magnitude to unsigned-byte decibels.
+ unsigned source_length = MagnitudeBuffer().size();
+ size_t len = std::min(source_length, destination_array->length());
+ if (len > 0) {
+ const double range_scale_factor = max_decibels_ == min_decibels_
+ ? 1
+ : 1 / (max_decibels_ - min_decibels_);
+ const double min_decibels = min_decibels_;
+
+ const float* source = MagnitudeBuffer().Data();
+ unsigned char* destination = destination_array->Data();
+
+ for (unsigned i = 0; i < len; ++i) {
+ float linear_value = source[i];
+ double db_mag = AudioUtilities::LinearToDecibels(linear_value);
+
+ // The range m_minDecibels to m_maxDecibels will be scaled to byte values
+ // from 0 to UCHAR_MAX.
+ double scaled_value =
+ UCHAR_MAX * (db_mag - min_decibels) * range_scale_factor;
+
+ // Clip to valid range.
+ if (scaled_value < 0)
+ scaled_value = 0;
+ if (scaled_value > UCHAR_MAX)
+ scaled_value = UCHAR_MAX;
+
+ destination[i] = static_cast<unsigned char>(scaled_value);
+ }
+ }
+}
+
+void RealtimeAnalyser::GetByteFrequencyData(DOMUint8Array* destination_array,
+ double current_time) {
+ DCHECK(IsMainThread());
+ DCHECK(destination_array);
+
+ if (current_time <= last_analysis_time_) {
+ // FIXME: Is it worth caching the data so we don't have to do the conversion
+ // every time? Perhaps not, since we expect many calls in the same
+ // rendering quantum.
+ ConvertToByteData(destination_array);
+ return;
+ }
+
+ // Time has advanced since the last call; update the FFT data.
+ last_analysis_time_ = current_time;
+ DoFFTAnalysis();
+
+ ConvertToByteData(destination_array);
+}
+
+void RealtimeAnalyser::GetFloatTimeDomainData(
+ DOMFloat32Array* destination_array) {
+ DCHECK(IsMainThread());
+ DCHECK(destination_array);
+
+ unsigned fft_size = this->FftSize();
+ size_t len = std::min(fft_size, destination_array->length());
+ if (len > 0) {
+ bool is_input_buffer_good = input_buffer_.size() == kInputBufferSize &&
+ input_buffer_.size() > fft_size;
+ DCHECK(is_input_buffer_good);
+ if (!is_input_buffer_good)
+ return;
+
+ float* input_buffer = input_buffer_.Data();
+ float* destination = destination_array->Data();
+
+ unsigned write_index = GetWriteIndex();
+
+ for (unsigned i = 0; i < len; ++i) {
+ // Buffer access is protected due to modulo operation.
+ float value =
+ input_buffer[(i + write_index - fft_size + kInputBufferSize) %
+ kInputBufferSize];
+
+ destination[i] = value;
+ }
+ }
+}
+
+void RealtimeAnalyser::GetByteTimeDomainData(DOMUint8Array* destination_array) {
+ DCHECK(IsMainThread());
+ DCHECK(destination_array);
+
+ unsigned fft_size = this->FftSize();
+ size_t len = std::min(fft_size, destination_array->length());
+ if (len > 0) {
+ bool is_input_buffer_good = input_buffer_.size() == kInputBufferSize &&
+ input_buffer_.size() > fft_size;
+ DCHECK(is_input_buffer_good);
+ if (!is_input_buffer_good)
+ return;
+
+ float* input_buffer = input_buffer_.Data();
+ unsigned char* destination = destination_array->Data();
+
+ unsigned write_index = GetWriteIndex();
+
+ for (unsigned i = 0; i < len; ++i) {
+ // Buffer access is protected due to modulo operation.
+ float value =
+ input_buffer[(i + write_index - fft_size + kInputBufferSize) %
+ kInputBufferSize];
+
+ // Scale from nominal -1 -> +1 to unsigned byte.
+ double scaled_value = 128 * (value + 1);
+
+ // Clip to valid range.
+ if (scaled_value < 0)
+ scaled_value = 0;
+ if (scaled_value > UCHAR_MAX)
+ scaled_value = UCHAR_MAX;
+
+ destination[i] = static_cast<unsigned char>(scaled_value);
+ }
+ }
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/realtime_analyser.h b/chromium/third_party/blink/renderer/modules/webaudio/realtime_analyser.h
new file mode 100644
index 00000000000..295b28a4caa
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/realtime_analyser.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_REALTIME_ANALYSER_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_REALTIME_ANALYSER_H_
+
+#include <memory>
+#include "third_party/blink/renderer/core/typed_arrays/dom_typed_array.h"
+#include "third_party/blink/renderer/platform/audio/audio_array.h"
+#include "third_party/blink/renderer/platform/audio/fft_frame.h"
+#include "third_party/blink/renderer/platform/wtf/noncopyable.h"
+
+namespace blink {
+
+class AudioBus;
+
+class RealtimeAnalyser final {
+ WTF_MAKE_NONCOPYABLE(RealtimeAnalyser);
+ DISALLOW_NEW();
+
+ public:
+ RealtimeAnalyser();
+
+ size_t FftSize() const { return fft_size_; }
+ bool SetFftSize(size_t);
+
+ unsigned FrequencyBinCount() const { return fft_size_ / 2; }
+
+ void SetMinDecibels(double k) { min_decibels_ = k; }
+ double MinDecibels() const { return min_decibels_; }
+
+ void SetMaxDecibels(double k) { max_decibels_ = k; }
+ double MaxDecibels() const { return max_decibels_; }
+
+ void SetSmoothingTimeConstant(double k) { smoothing_time_constant_ = k; }
+ double SmoothingTimeConstant() const { return smoothing_time_constant_; }
+
+ void GetFloatFrequencyData(DOMFloat32Array*, double);
+ void GetByteFrequencyData(DOMUint8Array*, double);
+ void GetFloatTimeDomainData(DOMFloat32Array*);
+ void GetByteTimeDomainData(DOMUint8Array*);
+
+ // The audio thread writes input data here.
+ void WriteInput(AudioBus*, size_t frames_to_process);
+
+ static const double kDefaultSmoothingTimeConstant;
+ static const double kDefaultMinDecibels;
+ static const double kDefaultMaxDecibels;
+
+ static const unsigned kDefaultFFTSize;
+ static const unsigned kMinFFTSize;
+ static const unsigned kMaxFFTSize;
+ static const unsigned kInputBufferSize;
+
+ private:
+ // The audio thread writes the input audio here.
+ AudioFloatArray input_buffer_;
+ unsigned write_index_;
+
+ unsigned GetWriteIndex() const { return AcquireLoad(&write_index_); }
+ void SetWriteIndex(unsigned new_index) {
+ ReleaseStore(&write_index_, new_index);
+ }
+
+ // Input audio is downmixed to this bus before copying to m_inputBuffer.
+ scoped_refptr<AudioBus> down_mix_bus_;
+
+ size_t fft_size_;
+ std::unique_ptr<FFTFrame> analysis_frame_;
+ void DoFFTAnalysis();
+
+ // Convert the contents of magnitudeBuffer to byte values, saving the result
+ // in |destination|.
+ void ConvertToByteData(DOMUint8Array* destination);
+
+ // Convert magnidue buffer to dB, saving the result in |destination|
+ void ConvertFloatToDb(DOMFloat32Array* destination);
+
+ // doFFTAnalysis() stores the floating-point magnitude analysis data here.
+ AudioFloatArray magnitude_buffer_;
+ AudioFloatArray& MagnitudeBuffer() { return magnitude_buffer_; }
+
+ // A value between 0 and 1 which averages the previous version of
+ // m_magnitudeBuffer with the current analysis magnitude data.
+ double smoothing_time_constant_;
+
+ // The range used when converting when using getByteFrequencyData().
+ double min_decibels_;
+ double max_decibels_;
+
+ // Time at which the FFT was last computed.
+ double last_analysis_time_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_REALTIME_ANALYSER_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/script_processor_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/script_processor_node.cc
new file mode 100644
index 00000000000..445f8a5853d
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/script_processor_node.cc
@@ -0,0 +1,516 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#include "third_party/blink/renderer/modules/webaudio/script_processor_node.h"
+
+#include <memory>
+
+#include "third_party/blink/public/platform/platform.h"
+#include "third_party/blink/public/platform/task_type.h"
+#include "third_party/blink/renderer/bindings/core/v8/exception_state.h"
+#include "third_party/blink/renderer/core/dom/exception_code.h"
+#include "third_party/blink/renderer/core/execution_context/execution_context.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_buffer.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node_input.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node_output.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_processing_event.h"
+#include "third_party/blink/renderer/modules/webaudio/base_audio_context.h"
+#include "third_party/blink/renderer/platform/cross_thread_functional.h"
+#include "third_party/blink/renderer/platform/waitable_event.h"
+
+namespace blink {
+
+ScriptProcessorHandler::ScriptProcessorHandler(
+ AudioNode& node,
+ float sample_rate,
+ size_t buffer_size,
+ unsigned number_of_input_channels,
+ unsigned number_of_output_channels)
+ : AudioHandler(kNodeTypeScriptProcessor, node, sample_rate),
+ double_buffer_index_(0),
+ buffer_size_(buffer_size),
+ buffer_read_write_index_(0),
+ number_of_input_channels_(number_of_input_channels),
+ number_of_output_channels_(number_of_output_channels),
+ internal_input_bus_(AudioBus::Create(number_of_input_channels,
+ AudioUtilities::kRenderQuantumFrames,
+ false)) {
+ // Regardless of the allowed buffer sizes, we still need to process at the
+ // granularity of the AudioNode.
+ if (buffer_size_ < AudioUtilities::kRenderQuantumFrames)
+ buffer_size_ = AudioUtilities::kRenderQuantumFrames;
+
+ DCHECK_LE(number_of_input_channels, BaseAudioContext::MaxNumberOfChannels());
+
+ AddInput();
+ AddOutput(number_of_output_channels);
+
+ channel_count_ = number_of_input_channels;
+ SetInternalChannelCountMode(kExplicit);
+
+ if (Context()->GetExecutionContext()) {
+ task_runner_ = Context()->GetExecutionContext()->GetTaskRunner(
+ TaskType::kMediaElementEvent);
+ }
+
+ Initialize();
+}
+
+scoped_refptr<ScriptProcessorHandler> ScriptProcessorHandler::Create(
+ AudioNode& node,
+ float sample_rate,
+ size_t buffer_size,
+ unsigned number_of_input_channels,
+ unsigned number_of_output_channels) {
+ return base::AdoptRef(new ScriptProcessorHandler(
+ node, sample_rate, buffer_size, number_of_input_channels,
+ number_of_output_channels));
+}
+
+ScriptProcessorHandler::~ScriptProcessorHandler() {
+ Uninitialize();
+}
+
+void ScriptProcessorHandler::Initialize() {
+ if (IsInitialized())
+ return;
+
+ float sample_rate = Context()->sampleRate();
+
+ // Create double buffers on both the input and output sides.
+ // These AudioBuffers will be directly accessed in the main thread by
+ // JavaScript.
+ for (unsigned i = 0; i < 2; ++i) {
+ AudioBuffer* input_buffer =
+ number_of_input_channels_
+ ? AudioBuffer::Create(number_of_input_channels_, BufferSize(),
+ sample_rate)
+ : nullptr;
+ AudioBuffer* output_buffer =
+ number_of_output_channels_
+ ? AudioBuffer::Create(number_of_output_channels_, BufferSize(),
+ sample_rate)
+ : nullptr;
+
+ input_buffers_.push_back(input_buffer);
+ output_buffers_.push_back(output_buffer);
+ }
+
+ AudioHandler::Initialize();
+}
+
+void ScriptProcessorHandler::Process(size_t frames_to_process) {
+ // Discussion about inputs and outputs:
+ // As in other AudioNodes, ScriptProcessorNode uses an AudioBus for its input
+ // and output (see inputBus and outputBus below). Additionally, there is a
+ // double-buffering for input and output which is exposed directly to
+ // JavaScript (see inputBuffer and outputBuffer below). This node is the
+ // producer for inputBuffer and the consumer for outputBuffer. The JavaScript
+ // code is the consumer of inputBuffer and the producer for outputBuffer.
+
+ // Get input and output busses.
+ AudioBus* input_bus = Input(0).Bus();
+ AudioBus* output_bus = Output(0).Bus();
+
+ // Get input and output buffers. We double-buffer both the input and output
+ // sides.
+ unsigned double_buffer_index = this->DoubleBufferIndex();
+ bool is_double_buffer_index_good =
+ double_buffer_index < 2 && double_buffer_index < input_buffers_.size() &&
+ double_buffer_index < output_buffers_.size();
+ DCHECK(is_double_buffer_index_good);
+ if (!is_double_buffer_index_good)
+ return;
+
+ AudioBuffer* input_buffer = input_buffers_[double_buffer_index].Get();
+ AudioBuffer* output_buffer = output_buffers_[double_buffer_index].Get();
+
+ // Check the consistency of input and output buffers.
+ unsigned number_of_input_channels = internal_input_bus_->NumberOfChannels();
+ bool buffers_are_good =
+ output_buffer && BufferSize() == output_buffer->length() &&
+ buffer_read_write_index_ + frames_to_process <= BufferSize();
+
+ // If the number of input channels is zero, it's ok to have inputBuffer = 0.
+ if (internal_input_bus_->NumberOfChannels())
+ buffers_are_good = buffers_are_good && input_buffer &&
+ BufferSize() == input_buffer->length();
+
+ DCHECK(buffers_are_good);
+ if (!buffers_are_good)
+ return;
+
+ // We assume that bufferSize() is evenly divisible by framesToProcess - should
+ // always be true, but we should still check.
+ bool is_frames_to_process_good = frames_to_process &&
+ BufferSize() >= frames_to_process &&
+ !(BufferSize() % frames_to_process);
+ DCHECK(is_frames_to_process_good);
+ if (!is_frames_to_process_good)
+ return;
+
+ unsigned number_of_output_channels = output_bus->NumberOfChannels();
+
+ bool channels_are_good =
+ (number_of_input_channels == number_of_input_channels_) &&
+ (number_of_output_channels == number_of_output_channels_);
+ DCHECK(channels_are_good);
+ if (!channels_are_good)
+ return;
+
+ for (unsigned i = 0; i < number_of_input_channels; ++i)
+ internal_input_bus_->SetChannelMemory(
+ i,
+ input_buffer->getChannelData(i).View()->Data() +
+ buffer_read_write_index_,
+ frames_to_process);
+
+ if (number_of_input_channels)
+ internal_input_bus_->CopyFrom(*input_bus);
+
+ // Copy from the output buffer to the output.
+ for (unsigned i = 0; i < number_of_output_channels; ++i) {
+ memcpy(output_bus->Channel(i)->MutableData(),
+ output_buffer->getChannelData(i).View()->Data() +
+ buffer_read_write_index_,
+ sizeof(float) * frames_to_process);
+ }
+
+ // Update the buffering index.
+ buffer_read_write_index_ =
+ (buffer_read_write_index_ + frames_to_process) % BufferSize();
+
+ // m_bufferReadWriteIndex will wrap back around to 0 when the current input
+ // and output buffers are full.
+ // When this happens, fire an event and swap buffers.
+ if (!buffer_read_write_index_) {
+ // Avoid building up requests on the main thread to fire process events when
+ // they're not being handled. This could be a problem if the main thread is
+ // very busy doing other things and is being held up handling previous
+ // requests. The audio thread can't block on this lock, so we call
+ // tryLock() instead.
+ MutexTryLocker try_locker(process_event_lock_);
+ if (!try_locker.Locked()) {
+ // We're late in handling the previous request. The main thread must be
+ // very busy. The best we can do is clear out the buffer ourself here.
+ output_buffer->Zero();
+ } else {
+ // With the realtime context, execute the script code asynchronously
+ // and do not wait.
+ if (Context()->HasRealtimeConstraint()) {
+ // Fire the event on the main thread with the appropriate buffer
+ // index.
+ PostCrossThreadTask(
+ *task_runner_, FROM_HERE,
+ CrossThreadBind(&ScriptProcessorHandler::FireProcessEvent,
+ WrapRefCounted(this), double_buffer_index_));
+ } else {
+ // If this node is in the offline audio context, use the
+ // waitable event to synchronize to the offline rendering thread.
+ std::unique_ptr<WaitableEvent> waitable_event =
+ std::make_unique<WaitableEvent>();
+
+ PostCrossThreadTask(
+ *task_runner_, FROM_HERE,
+ CrossThreadBind(
+ &ScriptProcessorHandler::FireProcessEventForOfflineAudioContext,
+ WrapRefCounted(this), double_buffer_index_,
+ CrossThreadUnretained(waitable_event.get())));
+
+ // Okay to block the offline audio rendering thread since it is
+ // not the actual audio device thread.
+ waitable_event->Wait();
+ }
+ }
+
+ SwapBuffers();
+ }
+}
+
+void ScriptProcessorHandler::FireProcessEvent(unsigned double_buffer_index) {
+ DCHECK(IsMainThread());
+
+ if (!Context() || !Context()->GetExecutionContext())
+ return;
+
+ DCHECK_LT(double_buffer_index, 2u);
+ if (double_buffer_index > 1)
+ return;
+
+ AudioBuffer* input_buffer = input_buffers_[double_buffer_index].Get();
+ AudioBuffer* output_buffer = output_buffers_[double_buffer_index].Get();
+ DCHECK(output_buffer);
+ if (!output_buffer)
+ return;
+
+ // Avoid firing the event if the document has already gone away.
+ if (GetNode()) {
+ // This synchronizes with process().
+ MutexLocker process_locker(process_event_lock_);
+
+ // Calculate a playbackTime with the buffersize which needs to be processed
+ // each time onaudioprocess is called. The outputBuffer being passed to JS
+ // will be played after exhuasting previous outputBuffer by
+ // double-buffering.
+ double playback_time = (Context()->CurrentSampleFrame() + buffer_size_) /
+ static_cast<double>(Context()->sampleRate());
+
+ // Call the JavaScript event handler which will do the audio processing.
+ GetNode()->DispatchEvent(AudioProcessingEvent::Create(
+ input_buffer, output_buffer, playback_time));
+ }
+}
+
+void ScriptProcessorHandler::FireProcessEventForOfflineAudioContext(
+ unsigned double_buffer_index,
+ WaitableEvent* waitable_event) {
+ DCHECK(IsMainThread());
+
+ if (!Context() || !Context()->GetExecutionContext())
+ return;
+
+ DCHECK_LT(double_buffer_index, 2u);
+ if (double_buffer_index > 1) {
+ waitable_event->Signal();
+ return;
+ }
+
+ AudioBuffer* input_buffer = input_buffers_[double_buffer_index].Get();
+ AudioBuffer* output_buffer = output_buffers_[double_buffer_index].Get();
+ DCHECK(output_buffer);
+ if (!output_buffer) {
+ waitable_event->Signal();
+ return;
+ }
+
+ if (GetNode()) {
+ // We do not need a process lock here because the offline render thread
+ // is locked by the waitable event.
+ double playback_time = (Context()->CurrentSampleFrame() + buffer_size_) /
+ static_cast<double>(Context()->sampleRate());
+ GetNode()->DispatchEvent(AudioProcessingEvent::Create(
+ input_buffer, output_buffer, playback_time));
+ }
+
+ waitable_event->Signal();
+}
+
+bool ScriptProcessorHandler::RequiresTailProcessing() const {
+ // Always return true since the tail and latency are never zero.
+ return true;
+}
+
+double ScriptProcessorHandler::TailTime() const {
+ return std::numeric_limits<double>::infinity();
+}
+
+double ScriptProcessorHandler::LatencyTime() const {
+ return std::numeric_limits<double>::infinity();
+}
+
+void ScriptProcessorHandler::SetChannelCount(unsigned long channel_count,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+ BaseAudioContext::GraphAutoLocker locker(Context());
+
+ if (channel_count != channel_count_) {
+ exception_state.ThrowDOMException(
+ kNotSupportedError, "channelCount cannot be changed from " +
+ String::Number(channel_count_) + " to " +
+ String::Number(channel_count));
+ }
+}
+
+void ScriptProcessorHandler::SetChannelCountMode(
+ const String& mode,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+ BaseAudioContext::GraphAutoLocker locker(Context());
+
+ if ((mode == "max") || (mode == "clamped-max")) {
+ exception_state.ThrowDOMException(
+ kNotSupportedError,
+ "channelCountMode cannot be changed from 'explicit' to '" + mode + "'");
+ }
+}
+
+// ----------------------------------------------------------------
+
+ScriptProcessorNode::ScriptProcessorNode(BaseAudioContext& context,
+ float sample_rate,
+ size_t buffer_size,
+ unsigned number_of_input_channels,
+ unsigned number_of_output_channels)
+ : AudioNode(context) {
+ SetHandler(ScriptProcessorHandler::Create(*this, sample_rate, buffer_size,
+ number_of_input_channels,
+ number_of_output_channels));
+}
+
+static size_t ChooseBufferSize(size_t callback_buffer_size) {
+ // Choose a buffer size based on the audio hardware buffer size. Arbitarily
+ // make it a power of two that is 4 times greater than the hardware buffer
+ // size.
+ // FIXME: What is the best way to choose this?
+ size_t buffer_size =
+ 1 << static_cast<unsigned>(log2(4 * callback_buffer_size) + 0.5);
+
+ if (buffer_size < 256)
+ return 256;
+ if (buffer_size > 16384)
+ return 16384;
+
+ return buffer_size;
+}
+
+ScriptProcessorNode* ScriptProcessorNode::Create(
+ BaseAudioContext& context,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ // Default buffer size is 0 (let WebAudio choose) with 2 inputs and 2
+ // outputs.
+ return Create(context, 0, 2, 2, exception_state);
+}
+
+ScriptProcessorNode* ScriptProcessorNode::Create(
+ BaseAudioContext& context,
+ size_t buffer_size,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ // Default is 2 inputs and 2 outputs.
+ return Create(context, buffer_size, 2, 2, exception_state);
+}
+
+ScriptProcessorNode* ScriptProcessorNode::Create(
+ BaseAudioContext& context,
+ size_t buffer_size,
+ unsigned number_of_input_channels,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ // Default is 2 outputs.
+ return Create(context, buffer_size, number_of_input_channels, 2,
+ exception_state);
+}
+
+ScriptProcessorNode* ScriptProcessorNode::Create(
+ BaseAudioContext& context,
+ size_t buffer_size,
+ unsigned number_of_input_channels,
+ unsigned number_of_output_channels,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ if (context.IsContextClosed()) {
+ context.ThrowExceptionForClosedState(exception_state);
+ return nullptr;
+ }
+
+ if (number_of_input_channels == 0 && number_of_output_channels == 0) {
+ exception_state.ThrowDOMException(
+ kIndexSizeError,
+ "number of input channels and output channels cannot both be zero.");
+ return nullptr;
+ }
+
+ if (number_of_input_channels > BaseAudioContext::MaxNumberOfChannels()) {
+ exception_state.ThrowDOMException(
+ kIndexSizeError,
+ "number of input channels (" +
+ String::Number(number_of_input_channels) + ") exceeds maximum (" +
+ String::Number(BaseAudioContext::MaxNumberOfChannels()) + ").");
+ return nullptr;
+ }
+
+ if (number_of_output_channels > BaseAudioContext::MaxNumberOfChannels()) {
+ exception_state.ThrowDOMException(
+ kIndexSizeError,
+ "number of output channels (" +
+ String::Number(number_of_output_channels) + ") exceeds maximum (" +
+ String::Number(BaseAudioContext::MaxNumberOfChannels()) + ").");
+ return nullptr;
+ }
+
+ // Check for valid buffer size.
+ switch (buffer_size) {
+ case 0:
+ // Choose an appropriate size. For an AudioContext, we need to
+ // choose an appropriate size based on the callback buffer size.
+ // For OfflineAudioContext, there's no callback buffer size, so
+ // just use the minimum valid buffer size.
+ buffer_size =
+ context.HasRealtimeConstraint()
+ ? ChooseBufferSize(context.destination()->CallbackBufferSize())
+ : 256;
+ break;
+ case 256:
+ case 512:
+ case 1024:
+ case 2048:
+ case 4096:
+ case 8192:
+ case 16384:
+ break;
+ default:
+ exception_state.ThrowDOMException(
+ kIndexSizeError,
+ "buffer size (" + String::Number(buffer_size) +
+ ") must be 0 or a power of two between 256 and 16384.");
+ return nullptr;
+ }
+
+ ScriptProcessorNode* node = new ScriptProcessorNode(
+ context, context.sampleRate(), buffer_size, number_of_input_channels,
+ number_of_output_channels);
+
+ if (!node)
+ return nullptr;
+
+ // context keeps reference until we stop making javascript rendering callbacks
+ context.NotifySourceNodeStartedProcessing(node);
+
+ return node;
+}
+
+size_t ScriptProcessorNode::bufferSize() const {
+ return static_cast<ScriptProcessorHandler&>(Handler()).BufferSize();
+}
+
+bool ScriptProcessorNode::HasPendingActivity() const {
+ // To prevent the node from leaking after the context is closed.
+ if (context()->IsContextClosed())
+ return false;
+
+ // If |onaudioprocess| event handler is defined, the node should not be
+ // GCed even if it is out of scope.
+ if (HasEventListeners(EventTypeNames::audioprocess))
+ return true;
+
+ return false;
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/script_processor_node.h b/chromium/third_party/blink/renderer/modules/webaudio/script_processor_node.h
new file mode 100644
index 00000000000..79bcf99df2e
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/script_processor_node.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_SCRIPT_PROCESSOR_NODE_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_SCRIPT_PROCESSOR_NODE_H_
+
+#include "base/gtest_prod_util.h"
+#include "base/memory/scoped_refptr.h"
+#include "third_party/blink/renderer/bindings/core/v8/active_script_wrappable.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node.h"
+#include "third_party/blink/renderer/platform/audio/audio_bus.h"
+#include "third_party/blink/renderer/platform/wtf/forward.h"
+#include "third_party/blink/renderer/platform/wtf/vector.h"
+
+namespace blink {
+
+class BaseAudioContext;
+class AudioBuffer;
+class WaitableEvent;
+
+// ScriptProcessorNode is an AudioNode which allows for arbitrary synthesis or
+// processing directly using JavaScript. The API allows for a variable number
+// of inputs and outputs, although it must have at least one input or output.
+// This basic implementation supports no more than one input and output. The
+// "onaudioprocess" attribute is an event listener which will get called
+// periodically with an AudioProcessingEvent which has AudioBuffers for each
+// input and output.
+
+class ScriptProcessorHandler final : public AudioHandler {
+ public:
+ static scoped_refptr<ScriptProcessorHandler> Create(
+ AudioNode&,
+ float sample_rate,
+ size_t buffer_size,
+ unsigned number_of_input_channels,
+ unsigned number_of_output_channels);
+ ~ScriptProcessorHandler() override;
+
+ // AudioHandler
+ void Process(size_t frames_to_process) override;
+ void Initialize() override;
+
+ size_t BufferSize() const { return buffer_size_; }
+
+ void SetChannelCount(unsigned long, ExceptionState&) override;
+ void SetChannelCountMode(const String&, ExceptionState&) override;
+
+ virtual unsigned NumberOfOutputChannels() const {
+ return number_of_output_channels_;
+ }
+
+ private:
+ ScriptProcessorHandler(AudioNode&,
+ float sample_rate,
+ size_t buffer_size,
+ unsigned number_of_input_channels,
+ unsigned number_of_output_channels);
+ double TailTime() const override;
+ double LatencyTime() const override;
+ bool RequiresTailProcessing() const final;
+
+ void FireProcessEvent(unsigned);
+ void FireProcessEventForOfflineAudioContext(unsigned, WaitableEvent*);
+
+ // Double buffering
+ unsigned DoubleBufferIndex() const { return double_buffer_index_; }
+ void SwapBuffers() { double_buffer_index_ = 1 - double_buffer_index_; }
+ unsigned double_buffer_index_;
+
+ // These Persistent don't make reference cycles including the owner
+ // ScriptProcessorNode.
+ PersistentHeapVector<Member<AudioBuffer>> input_buffers_;
+ PersistentHeapVector<Member<AudioBuffer>> output_buffers_;
+
+ size_t buffer_size_;
+ unsigned buffer_read_write_index_;
+
+ unsigned number_of_input_channels_;
+ unsigned number_of_output_channels_;
+
+ scoped_refptr<AudioBus> internal_input_bus_;
+ // Synchronize process() with fireProcessEvent().
+ mutable Mutex process_event_lock_;
+
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+
+ FRIEND_TEST_ALL_PREFIXES(ScriptProcessorNodeTest, BufferLifetime);
+};
+
+class ScriptProcessorNode final
+ : public AudioNode,
+ public ActiveScriptWrappable<ScriptProcessorNode> {
+ DEFINE_WRAPPERTYPEINFO();
+ USING_GARBAGE_COLLECTED_MIXIN(ScriptProcessorNode);
+
+ public:
+ // bufferSize must be one of the following values: 256, 512, 1024, 2048,
+ // 4096, 8192, 16384.
+ // This value controls how frequently the onaudioprocess event handler is
+ // called and how many sample-frames need to be processed each call.
+ // Lower numbers for bufferSize will result in a lower (better)
+ // latency. Higher numbers will be necessary to avoid audio breakup and
+ // glitches.
+ // The value chosen must carefully balance between latency and audio quality.
+ static ScriptProcessorNode* Create(BaseAudioContext&, ExceptionState&);
+ static ScriptProcessorNode* Create(BaseAudioContext&,
+ size_t buffer_size,
+ ExceptionState&);
+ static ScriptProcessorNode* Create(BaseAudioContext&,
+ size_t buffer_size,
+ unsigned number_of_input_channels,
+ ExceptionState&);
+ static ScriptProcessorNode* Create(BaseAudioContext&,
+ size_t buffer_size,
+ unsigned number_of_input_channels,
+ unsigned number_of_output_channels,
+ ExceptionState&);
+
+ DEFINE_ATTRIBUTE_EVENT_LISTENER(audioprocess);
+ size_t bufferSize() const;
+
+ // ScriptWrappable
+ bool HasPendingActivity() const final;
+
+ virtual void Trace(blink::Visitor* visitor) { AudioNode::Trace(visitor); }
+
+ private:
+ ScriptProcessorNode(BaseAudioContext&,
+ float sample_rate,
+ size_t buffer_size,
+ unsigned number_of_input_channels,
+ unsigned number_of_output_channels);
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_SCRIPT_PROCESSOR_NODE_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/script_processor_node.idl b/chromium/third_party/blink/renderer/modules/webaudio/script_processor_node.idl
new file mode 100644
index 00000000000..43b8b005338
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/script_processor_node.idl
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+// See https://webaudio.github.io/web-audio-api/#scriptprocessornode
+// For real-time audio stream synthesis/processing in JavaScript
+[
+ ActiveScriptWrappable
+] interface ScriptProcessorNode : AudioNode {
+ // Rendering callback
+ attribute EventHandler onaudioprocess;
+
+ readonly attribute long bufferSize;
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/script_processor_node_test.cc b/chromium/third_party/blink/renderer/modules/webaudio/script_processor_node_test.cc
new file mode 100644
index 00000000000..4a086fd0545
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/script_processor_node_test.cc
@@ -0,0 +1,31 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <memory>
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/blink/renderer/core/testing/dummy_page_holder.h"
+#include "third_party/blink/renderer/modules/webaudio/offline_audio_context.h"
+#include "third_party/blink/renderer/modules/webaudio/script_processor_node.h"
+
+namespace blink {
+
+TEST(ScriptProcessorNodeTest, BufferLifetime) {
+ std::unique_ptr<DummyPageHolder> page = DummyPageHolder::Create();
+ OfflineAudioContext* context = OfflineAudioContext::Create(
+ &page->GetDocument(), 2, 1, 48000, ASSERT_NO_EXCEPTION);
+ ScriptProcessorNode* node =
+ context->createScriptProcessor(ASSERT_NO_EXCEPTION);
+ ScriptProcessorHandler& handler =
+ static_cast<ScriptProcessorHandler&>(node->Handler());
+ EXPECT_EQ(2u, handler.input_buffers_.size());
+ EXPECT_EQ(2u, handler.output_buffers_.size());
+ BaseAudioContext::GraphAutoLocker locker(context);
+ handler.Dispose();
+ // Buffers should live after dispose() because an audio thread is using
+ // them.
+ EXPECT_EQ(2u, handler.input_buffers_.size());
+ EXPECT_EQ(2u, handler.output_buffers_.size());
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/stereo_panner_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/stereo_panner_node.cc
new file mode 100644
index 00000000000..1bc8b81c592
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/stereo_panner_node.cc
@@ -0,0 +1,190 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/bindings/core/v8/exception_messages.h"
+#include "third_party/blink/renderer/bindings/core/v8/exception_state.h"
+#include "third_party/blink/renderer/core/dom/exception_code.h"
+#include "third_party/blink/renderer/core/execution_context/execution_context.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node_input.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node_output.h"
+#include "third_party/blink/renderer/modules/webaudio/base_audio_context.h"
+#include "third_party/blink/renderer/modules/webaudio/stereo_panner_node.h"
+#include "third_party/blink/renderer/modules/webaudio/stereo_panner_options.h"
+#include "third_party/blink/renderer/platform/audio/stereo_panner.h"
+#include "third_party/blink/renderer/platform/wtf/math_extras.h"
+
+namespace blink {
+
+StereoPannerHandler::StereoPannerHandler(AudioNode& node,
+ float sample_rate,
+ AudioParamHandler& pan)
+ : AudioHandler(kNodeTypeStereoPanner, node, sample_rate),
+ pan_(&pan),
+ sample_accurate_pan_values_(AudioUtilities::kRenderQuantumFrames) {
+ AddInput();
+ AddOutput(2);
+
+ // The node-specific default mixing rules declare that StereoPannerNode
+ // can handle mono to stereo and stereo to stereo conversion.
+ channel_count_ = 2;
+ SetInternalChannelCountMode(kClampedMax);
+ SetInternalChannelInterpretation(AudioBus::kSpeakers);
+
+ Initialize();
+}
+
+scoped_refptr<StereoPannerHandler> StereoPannerHandler::Create(
+ AudioNode& node,
+ float sample_rate,
+ AudioParamHandler& pan) {
+ return base::AdoptRef(new StereoPannerHandler(node, sample_rate, pan));
+}
+
+StereoPannerHandler::~StereoPannerHandler() {
+ Uninitialize();
+}
+
+void StereoPannerHandler::Process(size_t frames_to_process) {
+ AudioBus* output_bus = Output(0).Bus();
+
+ if (!IsInitialized() || !Input(0).IsConnected() || !stereo_panner_.get()) {
+ output_bus->Zero();
+ return;
+ }
+
+ AudioBus* input_bus = Input(0).Bus();
+ if (!input_bus) {
+ output_bus->Zero();
+ return;
+ }
+
+ if (pan_->HasSampleAccurateValues()) {
+ // Apply sample-accurate panning specified by AudioParam automation.
+ DCHECK_LE(frames_to_process, sample_accurate_pan_values_.size());
+ if (frames_to_process <= sample_accurate_pan_values_.size()) {
+ float* pan_values = sample_accurate_pan_values_.Data();
+ pan_->CalculateSampleAccurateValues(pan_values, frames_to_process);
+ stereo_panner_->PanWithSampleAccurateValues(
+ input_bus, output_bus, pan_values, frames_to_process);
+ }
+ } else {
+ stereo_panner_->PanToTargetValue(input_bus, output_bus, pan_->Value(),
+ frames_to_process);
+ }
+}
+
+void StereoPannerHandler::ProcessOnlyAudioParams(size_t frames_to_process) {
+ float values[AudioUtilities::kRenderQuantumFrames];
+ DCHECK_LE(frames_to_process, AudioUtilities::kRenderQuantumFrames);
+
+ pan_->CalculateSampleAccurateValues(values, frames_to_process);
+}
+
+void StereoPannerHandler::Initialize() {
+ if (IsInitialized())
+ return;
+
+ stereo_panner_ = StereoPanner::Create(Context()->sampleRate());
+
+ AudioHandler::Initialize();
+}
+
+void StereoPannerHandler::SetChannelCount(unsigned long channel_count,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+ BaseAudioContext::GraphAutoLocker locker(Context());
+
+ // A PannerNode only supports 1 or 2 channels
+ if (channel_count > 0 && channel_count <= 2) {
+ if (channel_count_ != channel_count) {
+ channel_count_ = channel_count;
+ if (InternalChannelCountMode() != kMax)
+ UpdateChannelsForInputs();
+ }
+ } else {
+ exception_state.ThrowDOMException(
+ kNotSupportedError, ExceptionMessages::IndexOutsideRange<unsigned long>(
+ "channelCount", channel_count, 1,
+ ExceptionMessages::kInclusiveBound, 2,
+ ExceptionMessages::kInclusiveBound));
+ }
+}
+
+void StereoPannerHandler::SetChannelCountMode(const String& mode,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+ BaseAudioContext::GraphAutoLocker locker(Context());
+
+ ChannelCountMode old_mode = InternalChannelCountMode();
+
+ if (mode == "clamped-max") {
+ new_channel_count_mode_ = kClampedMax;
+ } else if (mode == "explicit") {
+ new_channel_count_mode_ = kExplicit;
+ } else if (mode == "max") {
+ // This is not supported for a StereoPannerNode, which can only handle
+ // 1 or 2 channels.
+ exception_state.ThrowDOMException(kNotSupportedError,
+ "StereoPanner: 'max' is not allowed");
+ new_channel_count_mode_ = old_mode;
+ } else {
+ // Do nothing for other invalid values.
+ new_channel_count_mode_ = old_mode;
+ }
+
+ if (new_channel_count_mode_ != old_mode)
+ Context()->GetDeferredTaskHandler().AddChangedChannelCountMode(this);
+}
+
+// ----------------------------------------------------------------
+
+StereoPannerNode::StereoPannerNode(BaseAudioContext& context)
+ : AudioNode(context),
+ pan_(AudioParam::Create(context,
+ kParamTypeStereoPannerPan,
+ "StereoPanner.pan",
+ 0,
+ -1,
+ 1)) {
+ SetHandler(StereoPannerHandler::Create(*this, context.sampleRate(),
+ pan_->Handler()));
+}
+
+StereoPannerNode* StereoPannerNode::Create(BaseAudioContext& context,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ if (context.IsContextClosed()) {
+ context.ThrowExceptionForClosedState(exception_state);
+ return nullptr;
+ }
+
+ return new StereoPannerNode(context);
+}
+
+StereoPannerNode* StereoPannerNode::Create(BaseAudioContext* context,
+ const StereoPannerOptions& options,
+ ExceptionState& exception_state) {
+ StereoPannerNode* node = Create(*context, exception_state);
+
+ if (!node)
+ return nullptr;
+
+ node->HandleChannelOptions(options, exception_state);
+
+ node->pan()->setValue(options.pan());
+
+ return node;
+}
+
+void StereoPannerNode::Trace(blink::Visitor* visitor) {
+ visitor->Trace(pan_);
+ AudioNode::Trace(visitor);
+}
+
+AudioParam* StereoPannerNode::pan() const {
+ return pan_;
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/stereo_panner_node.h b/chromium/third_party/blink/renderer/modules/webaudio/stereo_panner_node.h
new file mode 100644
index 00000000000..34e5decfd95
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/stereo_panner_node.h
@@ -0,0 +1,71 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_STEREO_PANNER_NODE_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_STEREO_PANNER_NODE_H_
+
+#include <memory>
+#include "base/gtest_prod_util.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_param.h"
+#include "third_party/blink/renderer/platform/audio/audio_bus.h"
+#include "third_party/blink/renderer/platform/audio/stereo_panner.h"
+
+namespace blink {
+
+class BaseAudioContext;
+class StereoPannerOptions;
+
+// StereoPannerNode is an AudioNode with one input and one output. It is
+// specifically designed for equal-power stereo panning.
+class StereoPannerHandler final : public AudioHandler {
+ public:
+ static scoped_refptr<StereoPannerHandler> Create(AudioNode&,
+ float sample_rate,
+ AudioParamHandler& pan);
+ ~StereoPannerHandler() override;
+
+ void Process(size_t frames_to_process) override;
+ void ProcessOnlyAudioParams(size_t frames_to_process) override;
+ void Initialize() override;
+
+ void SetChannelCount(unsigned long, ExceptionState&) final;
+ void SetChannelCountMode(const String&, ExceptionState&) final;
+
+ double TailTime() const override { return 0; }
+ double LatencyTime() const override { return 0; }
+ bool RequiresTailProcessing() const final { return false; }
+
+ private:
+ StereoPannerHandler(AudioNode&, float sample_rate, AudioParamHandler& pan);
+
+ std::unique_ptr<StereoPanner> stereo_panner_;
+ scoped_refptr<AudioParamHandler> pan_;
+
+ AudioFloatArray sample_accurate_pan_values_;
+
+ FRIEND_TEST_ALL_PREFIXES(StereoPannerNodeTest, StereoPannerLifetime);
+};
+
+class StereoPannerNode final : public AudioNode {
+ DEFINE_WRAPPERTYPEINFO();
+
+ public:
+ static StereoPannerNode* Create(BaseAudioContext&, ExceptionState&);
+ static StereoPannerNode* Create(BaseAudioContext*,
+ const StereoPannerOptions&,
+ ExceptionState&);
+ virtual void Trace(blink::Visitor*);
+
+ AudioParam* pan() const;
+
+ private:
+ StereoPannerNode(BaseAudioContext&);
+
+ Member<AudioParam> pan_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_STEREO_PANNER_NODE_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/stereo_panner_node.idl b/chromium/third_party/blink/renderer/modules/webaudio/stereo_panner_node.idl
new file mode 100644
index 00000000000..2753d919ae4
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/stereo_panner_node.idl
@@ -0,0 +1,15 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See https://webaudio.github.io/web-audio-api/#stereopannernode
+[
+ Constructor(BaseAudioContext context, optional StereoPannerOptions options),
+ RaisesException=Constructor,
+ Measure
+]
+interface StereoPannerNode : AudioNode {
+
+ readonly attribute AudioParam pan;
+
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/stereo_panner_node_test.cc b/chromium/third_party/blink/renderer/modules/webaudio/stereo_panner_node_test.cc
new file mode 100644
index 00000000000..36f180423ee
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/stereo_panner_node_test.cc
@@ -0,0 +1,28 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <memory>
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/blink/renderer/core/testing/dummy_page_holder.h"
+#include "third_party/blink/renderer/modules/webaudio/offline_audio_context.h"
+#include "third_party/blink/renderer/modules/webaudio/stereo_panner_node.h"
+
+namespace blink {
+
+TEST(StereoPannerNodeTest, StereoPannerLifetime) {
+ std::unique_ptr<DummyPageHolder> page = DummyPageHolder::Create();
+ OfflineAudioContext* context = OfflineAudioContext::Create(
+ &page->GetDocument(), 2, 1, 48000, ASSERT_NO_EXCEPTION);
+ StereoPannerNode* node = context->createStereoPanner(ASSERT_NO_EXCEPTION);
+ StereoPannerHandler& handler =
+ static_cast<StereoPannerHandler&>(node->Handler());
+ EXPECT_TRUE(handler.stereo_panner_);
+ BaseAudioContext::GraphAutoLocker locker(context);
+ handler.Dispose();
+ // m_stereoPanner should live after dispose() because an audio thread is
+ // using it.
+ EXPECT_TRUE(handler.stereo_panner_);
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/stereo_panner_options.idl b/chromium/third_party/blink/renderer/modules/webaudio/stereo_panner_options.idl
new file mode 100644
index 00000000000..6e597f99f2c
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/stereo_panner_options.idl
@@ -0,0 +1,8 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See https://webaudio.github.io/web-audio-api/#dictdef-stereopanneroptions
+dictionary StereoPannerOptions : AudioNodeOptions {
+ float pan = 0;
+}; \ No newline at end of file
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/testing/internals_web_audio.cc b/chromium/third_party/blink/renderer/modules/webaudio/testing/internals_web_audio.cc
new file mode 100644
index 00000000000..f45f5db831b
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/testing/internals_web_audio.cc
@@ -0,0 +1,21 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/modules/webaudio/testing/internals_web_audio.h"
+
+#include "third_party/blink/renderer/modules/webaudio/audio_node.h"
+#include "third_party/blink/renderer/platform/instance_counters.h"
+
+namespace blink {
+
+unsigned InternalsWebAudio::audioHandlerCount(Internals& internals) {
+#if DEBUG_AUDIONODE_REFERENCES
+ fprintf(
+ stderr, "InternalsWebAudio::audioHandlerCount = %u\n",
+ InstanceCounters::CounterValue(InstanceCounters::kAudioHandlerCounter));
+#endif
+ return InstanceCounters::CounterValue(InstanceCounters::kAudioHandlerCounter);
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/testing/internals_web_audio.h b/chromium/third_party/blink/renderer/modules/webaudio/testing/internals_web_audio.h
new file mode 100644
index 00000000000..f9af7d01a1a
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/testing/internals_web_audio.h
@@ -0,0 +1,23 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_TESTING_INTERNALS_WEB_AUDIO_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_TESTING_INTERNALS_WEB_AUDIO_H_
+
+#include "third_party/blink/renderer/platform/wtf/allocator.h"
+
+namespace blink {
+
+class Internals;
+
+class InternalsWebAudio {
+ STATIC_ONLY(InternalsWebAudio);
+
+ public:
+ static unsigned audioHandlerCount(Internals&);
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_TESTING_INTERNALS_WEB_AUDIO_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/testing/internals_web_audio.idl b/chromium/third_party/blink/renderer/modules/webaudio/testing/internals_web_audio.idl
new file mode 100644
index 00000000000..9ee6fc0f888
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/testing/internals_web_audio.idl
@@ -0,0 +1,9 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+[
+ ImplementedAs=InternalsWebAudio
+] partial interface Internals {
+ unsigned long audioHandlerCount();
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_dsp_kernel.cc b/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_dsp_kernel.cc
new file mode 100644
index 00000000000..6ffee6b427c
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_dsp_kernel.cc
@@ -0,0 +1,219 @@
+/*
+ * Copyright (C) 2011, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#include "third_party/blink/renderer/modules/webaudio/wave_shaper_dsp_kernel.h"
+
+#include <algorithm>
+#include <memory>
+
+#include "third_party/blink/renderer/platform/audio/audio_utilities.h"
+#include "third_party/blink/renderer/platform/wtf/threading.h"
+
+namespace blink {
+
+WaveShaperDSPKernel::WaveShaperDSPKernel(WaveShaperProcessor* processor)
+ : AudioDSPKernel(processor) {
+ if (processor->Oversample() != WaveShaperProcessor::kOverSampleNone)
+ LazyInitializeOversampling();
+}
+
+void WaveShaperDSPKernel::LazyInitializeOversampling() {
+ if (!temp_buffer_) {
+ temp_buffer_ = std::make_unique<AudioFloatArray>(
+ AudioUtilities::kRenderQuantumFrames * 2);
+ temp_buffer2_ = std::make_unique<AudioFloatArray>(
+ AudioUtilities::kRenderQuantumFrames * 4);
+ up_sampler_ =
+ std::make_unique<UpSampler>(AudioUtilities::kRenderQuantumFrames);
+ down_sampler_ =
+ std::make_unique<DownSampler>(AudioUtilities::kRenderQuantumFrames * 2);
+ up_sampler2_ =
+ std::make_unique<UpSampler>(AudioUtilities::kRenderQuantumFrames * 2);
+ down_sampler2_ =
+ std::make_unique<DownSampler>(AudioUtilities::kRenderQuantumFrames * 4);
+ }
+}
+
+void WaveShaperDSPKernel::Process(const float* source,
+ float* destination,
+ size_t frames_to_process) {
+ switch (GetWaveShaperProcessor()->Oversample()) {
+ case WaveShaperProcessor::kOverSampleNone:
+ ProcessCurve(source, destination, frames_to_process);
+ break;
+ case WaveShaperProcessor::kOverSample2x:
+ ProcessCurve2x(source, destination, frames_to_process);
+ break;
+ case WaveShaperProcessor::kOverSample4x:
+ ProcessCurve4x(source, destination, frames_to_process);
+ break;
+
+ default:
+ NOTREACHED();
+ }
+}
+
+void WaveShaperDSPKernel::ProcessCurve(const float* source,
+ float* destination,
+ size_t frames_to_process) {
+ DCHECK(source);
+ DCHECK(destination);
+ DCHECK(GetWaveShaperProcessor());
+
+ Vector<float>* curve = GetWaveShaperProcessor()->Curve();
+ if (!curve) {
+ // Act as "straight wire" pass-through if no curve is set.
+ memcpy(destination, source, sizeof(float) * frames_to_process);
+ return;
+ }
+
+ float* curve_data = curve->data();
+ int curve_length = curve->size();
+
+ DCHECK(curve_data);
+
+ if (!curve_data || !curve_length) {
+ memcpy(destination, source, sizeof(float) * frames_to_process);
+ return;
+ }
+
+ // Apply waveshaping curve.
+ for (unsigned i = 0; i < frames_to_process; ++i) {
+ const float input = source[i];
+
+ // Calculate a virtual index based on input -1 -> +1 with -1 being curve[0],
+ // +1 being curve[curveLength - 1], and 0 being at the center of the curve
+ // data. Then linearly interpolate between the two points in the curve.
+ double virtual_index = 0.5 * (input + 1) * (curve_length - 1);
+ double output;
+
+ if (virtual_index < 0) {
+ // input < -1, so use curve[0]
+ output = curve_data[0];
+ } else if (virtual_index >= curve_length - 1) {
+ // input >= 1, so use last curve value
+ output = curve_data[curve_length - 1];
+ } else {
+ // The general case where -1 <= input < 1, where 0 <= virtualIndex <
+ // curveLength - 1, so interpolate between the nearest samples on the
+ // curve.
+ unsigned index1 = static_cast<unsigned>(virtual_index);
+ unsigned index2 = index1 + 1;
+ double interpolation_factor = virtual_index - index1;
+
+ double value1 = curve_data[index1];
+ double value2 = curve_data[index2];
+
+ output =
+ (1.0 - interpolation_factor) * value1 + interpolation_factor * value2;
+ }
+ destination[i] = output;
+ }
+}
+
+void WaveShaperDSPKernel::ProcessCurve2x(const float* source,
+ float* destination,
+ size_t frames_to_process) {
+ bool is_safe = frames_to_process == AudioUtilities::kRenderQuantumFrames;
+ DCHECK(is_safe);
+ if (!is_safe)
+ return;
+
+ float* temp_p = temp_buffer_->Data();
+
+ up_sampler_->Process(source, temp_p, frames_to_process);
+
+ // Process at 2x up-sampled rate.
+ ProcessCurve(temp_p, temp_p, frames_to_process * 2);
+
+ down_sampler_->Process(temp_p, destination, frames_to_process * 2);
+}
+
+void WaveShaperDSPKernel::ProcessCurve4x(const float* source,
+ float* destination,
+ size_t frames_to_process) {
+ bool is_safe = frames_to_process == AudioUtilities::kRenderQuantumFrames;
+ DCHECK(is_safe);
+ if (!is_safe)
+ return;
+
+ float* temp_p = temp_buffer_->Data();
+ float* temp_p2 = temp_buffer2_->Data();
+
+ up_sampler_->Process(source, temp_p, frames_to_process);
+ up_sampler2_->Process(temp_p, temp_p2, frames_to_process * 2);
+
+ // Process at 4x up-sampled rate.
+ ProcessCurve(temp_p2, temp_p2, frames_to_process * 4);
+
+ down_sampler2_->Process(temp_p2, temp_p, frames_to_process * 4);
+ down_sampler_->Process(temp_p, destination, frames_to_process * 2);
+}
+
+void WaveShaperDSPKernel::Reset() {
+ if (up_sampler_) {
+ up_sampler_->Reset();
+ down_sampler_->Reset();
+ up_sampler2_->Reset();
+ down_sampler2_->Reset();
+ }
+}
+
+bool WaveShaperDSPKernel::RequiresTailProcessing() const {
+ // Always return true even if the tail time and latency might both be zero.
+ return true;
+}
+
+double WaveShaperDSPKernel::LatencyTime() const {
+ size_t latency_frames = 0;
+ WaveShaperDSPKernel* kernel = const_cast<WaveShaperDSPKernel*>(this);
+
+ switch (kernel->GetWaveShaperProcessor()->Oversample()) {
+ case WaveShaperProcessor::kOverSampleNone:
+ break;
+ case WaveShaperProcessor::kOverSample2x:
+ latency_frames += up_sampler_->LatencyFrames();
+ latency_frames += down_sampler_->LatencyFrames();
+ break;
+ case WaveShaperProcessor::kOverSample4x: {
+ // Account for first stage upsampling.
+ latency_frames += up_sampler_->LatencyFrames();
+ latency_frames += down_sampler_->LatencyFrames();
+
+ // Account for second stage upsampling.
+ // and divide by 2 to get back down to the regular sample-rate.
+ size_t latency_frames2 =
+ (up_sampler2_->LatencyFrames() + down_sampler2_->LatencyFrames()) / 2;
+ latency_frames += latency_frames2;
+ break;
+ }
+ default:
+ NOTREACHED();
+ }
+
+ return static_cast<double>(latency_frames) / SampleRate();
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_dsp_kernel.h b/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_dsp_kernel.h
new file mode 100644
index 00000000000..b3c308aedfc
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_dsp_kernel.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2011, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_WAVE_SHAPER_DSP_KERNEL_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_WAVE_SHAPER_DSP_KERNEL_H_
+
+#include <memory>
+#include "third_party/blink/renderer/modules/webaudio/wave_shaper_processor.h"
+#include "third_party/blink/renderer/platform/audio/audio_array.h"
+#include "third_party/blink/renderer/platform/audio/audio_dsp_kernel.h"
+#include "third_party/blink/renderer/platform/audio/down_sampler.h"
+#include "third_party/blink/renderer/platform/audio/up_sampler.h"
+
+namespace blink {
+
+class WaveShaperProcessor;
+
+// WaveShaperDSPKernel is an AudioDSPKernel and is responsible for non-linear
+// distortion on one channel.
+
+class WaveShaperDSPKernel final : public AudioDSPKernel {
+ public:
+ explicit WaveShaperDSPKernel(WaveShaperProcessor*);
+
+ // AudioDSPKernel
+ void Process(const float* source,
+ float* dest,
+ size_t frames_to_process) override;
+ void Reset() override;
+ double TailTime() const override { return 0; }
+ double LatencyTime() const override;
+ bool RequiresTailProcessing() const final;
+
+ // Oversampling requires more resources, so let's only allocate them if
+ // needed.
+ void LazyInitializeOversampling();
+
+ protected:
+ // Apply the shaping curve.
+ void ProcessCurve(const float* source, float* dest, size_t frames_to_process);
+
+ // Use up-sampling, process at the higher sample-rate, then down-sample.
+ void ProcessCurve2x(const float* source,
+ float* dest,
+ size_t frames_to_process);
+ void ProcessCurve4x(const float* source,
+ float* dest,
+ size_t frames_to_process);
+
+ WaveShaperProcessor* GetWaveShaperProcessor() {
+ return static_cast<WaveShaperProcessor*>(Processor());
+ }
+
+ // Oversampling.
+ std::unique_ptr<AudioFloatArray> temp_buffer_;
+ std::unique_ptr<AudioFloatArray> temp_buffer2_;
+ std::unique_ptr<UpSampler> up_sampler_;
+ std::unique_ptr<DownSampler> down_sampler_;
+ std::unique_ptr<UpSampler> up_sampler2_;
+ std::unique_ptr<DownSampler> down_sampler2_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_WAVE_SHAPER_DSP_KERNEL_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_node.cc b/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_node.cc
new file mode 100644
index 00000000000..eb0ff5552bf
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_node.cc
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2011, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#include "third_party/blink/renderer/modules/webaudio/wave_shaper_node.h"
+
+#include <memory>
+
+#include "third_party/blink/renderer/bindings/core/v8/exception_messages.h"
+#include "third_party/blink/renderer/bindings/core/v8/exception_state.h"
+#include "third_party/blink/renderer/core/dom/exception_code.h"
+#include "third_party/blink/renderer/modules/webaudio/base_audio_context.h"
+#include "third_party/blink/renderer/modules/webaudio/wave_shaper_options.h"
+
+namespace blink {
+
+WaveShaperHandler::WaveShaperHandler(AudioNode& node, float sample_rate)
+ : AudioBasicProcessorHandler(
+ kNodeTypeWaveShaper,
+ node,
+ sample_rate,
+ std::make_unique<WaveShaperProcessor>(sample_rate, 1)) {
+ Initialize();
+}
+
+scoped_refptr<WaveShaperHandler> WaveShaperHandler::Create(AudioNode& node,
+ float sample_rate) {
+ return base::AdoptRef(new WaveShaperHandler(node, sample_rate));
+}
+
+WaveShaperNode::WaveShaperNode(BaseAudioContext& context) : AudioNode(context) {
+ SetHandler(WaveShaperHandler::Create(*this, context.sampleRate()));
+}
+
+WaveShaperNode* WaveShaperNode::Create(BaseAudioContext& context,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ if (context.IsContextClosed()) {
+ context.ThrowExceptionForClosedState(exception_state);
+ return nullptr;
+ }
+
+ return new WaveShaperNode(context);
+}
+
+WaveShaperNode* WaveShaperNode::Create(BaseAudioContext* context,
+ const WaveShaperOptions& options,
+ ExceptionState& exception_state) {
+ WaveShaperNode* node = Create(*context, exception_state);
+
+ if (!node)
+ return nullptr;
+
+ node->HandleChannelOptions(options, exception_state);
+
+ if (options.hasCurve())
+ node->setCurve(options.curve(), exception_state);
+
+ node->setOversample(options.oversample());
+
+ return node;
+}
+WaveShaperProcessor* WaveShaperNode::GetWaveShaperProcessor() const {
+ return static_cast<WaveShaperProcessor*>(
+ static_cast<WaveShaperHandler&>(Handler()).Processor());
+}
+
+void WaveShaperNode::SetCurveImpl(const float* curve_data,
+ unsigned curve_length,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ if (curve_data && curve_length < 2) {
+ exception_state.ThrowDOMException(
+ kInvalidAccessError,
+ ExceptionMessages::IndexExceedsMinimumBound<unsigned>("curve length",
+ curve_length, 2));
+ return;
+ }
+
+ GetWaveShaperProcessor()->SetCurve(curve_data, curve_length);
+}
+
+void WaveShaperNode::setCurve(NotShared<DOMFloat32Array> curve,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ if (curve)
+ SetCurveImpl(curve.View()->Data(), curve.View()->length(), exception_state);
+ else
+ SetCurveImpl(nullptr, 0, exception_state);
+}
+
+void WaveShaperNode::setCurve(const Vector<float>& curve,
+ ExceptionState& exception_state) {
+ DCHECK(IsMainThread());
+
+ SetCurveImpl(curve.data(), curve.size(), exception_state);
+}
+
+NotShared<DOMFloat32Array> WaveShaperNode::curve() {
+ Vector<float>* curve = GetWaveShaperProcessor()->Curve();
+ if (!curve)
+ return NotShared<DOMFloat32Array>(nullptr);
+
+ unsigned size = curve->size();
+ scoped_refptr<WTF::Float32Array> new_curve = WTF::Float32Array::Create(size);
+
+ memcpy(new_curve->Data(), curve->data(), sizeof(float) * size);
+
+ return NotShared<DOMFloat32Array>(
+ DOMFloat32Array::Create(std::move(new_curve)));
+}
+
+void WaveShaperNode::setOversample(const String& type) {
+ DCHECK(IsMainThread());
+
+ // This is to synchronize with the changes made in
+ // AudioBasicProcessorNode::checkNumberOfChannelsForInput() where we can
+ // initialize() and uninitialize().
+ BaseAudioContext::GraphAutoLocker context_locker(context());
+
+ if (type == "none") {
+ GetWaveShaperProcessor()->SetOversample(
+ WaveShaperProcessor::kOverSampleNone);
+ } else if (type == "2x") {
+ GetWaveShaperProcessor()->SetOversample(WaveShaperProcessor::kOverSample2x);
+ } else if (type == "4x") {
+ GetWaveShaperProcessor()->SetOversample(WaveShaperProcessor::kOverSample4x);
+ } else {
+ NOTREACHED();
+ }
+}
+
+String WaveShaperNode::oversample() const {
+ switch (const_cast<WaveShaperNode*>(this)
+ ->GetWaveShaperProcessor()
+ ->Oversample()) {
+ case WaveShaperProcessor::kOverSampleNone:
+ return "none";
+ case WaveShaperProcessor::kOverSample2x:
+ return "2x";
+ case WaveShaperProcessor::kOverSample4x:
+ return "4x";
+ default:
+ NOTREACHED();
+ return "none";
+ }
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_node.h b/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_node.h
new file mode 100644
index 00000000000..6f51f121dc9
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_node.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2011, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_WAVE_SHAPER_NODE_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_WAVE_SHAPER_NODE_H_
+
+#include "third_party/blink/renderer/core/typed_arrays/array_buffer_view_helpers.h"
+#include "third_party/blink/renderer/core/typed_arrays/dom_typed_array.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_basic_processor_handler.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node.h"
+#include "third_party/blink/renderer/modules/webaudio/wave_shaper_processor.h"
+
+namespace blink {
+
+class BaseAudioContext;
+class ExceptionState;
+class WaveShaperOptions;
+
+class WaveShaperHandler : public AudioBasicProcessorHandler {
+ public:
+ static scoped_refptr<WaveShaperHandler> Create(AudioNode&, float sample_rate);
+
+ private:
+ WaveShaperHandler(AudioNode& iirfilter_node, float sample_rate);
+};
+
+class WaveShaperNode final : public AudioNode {
+ DEFINE_WRAPPERTYPEINFO();
+
+ public:
+ static WaveShaperNode* Create(BaseAudioContext&, ExceptionState&);
+ static WaveShaperNode* Create(BaseAudioContext*,
+ const WaveShaperOptions&,
+ ExceptionState&);
+
+ // setCurve() is called on the main thread.
+ void setCurve(NotShared<DOMFloat32Array>, ExceptionState&);
+ void setCurve(const Vector<float>&, ExceptionState&);
+ NotShared<DOMFloat32Array> curve();
+
+ void setOversample(const String&);
+ String oversample() const;
+
+ private:
+ explicit WaveShaperNode(BaseAudioContext&);
+
+ void SetCurveImpl(const float* curve_data,
+ unsigned curve_length,
+ ExceptionState&);
+ WaveShaperProcessor* GetWaveShaperProcessor() const;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_WAVE_SHAPER_NODE_H_
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_node.idl b/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_node.idl
new file mode 100644
index 00000000000..bfecb7e3ff2
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_node.idl
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2011, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+// See https://webaudio.github.io/web-audio-api/#waveshapernode
+enum OverSampleType {
+ "none",
+ "2x",
+ "4x"
+};
+
+[
+ Constructor(BaseAudioContext context, optional WaveShaperOptions options),
+ RaisesException=Constructor,
+ Measure
+]
+interface WaveShaperNode : AudioNode {
+ [RaisesException=Setter] attribute Float32Array? curve;
+ attribute OverSampleType oversample;
+};
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_options.idl b/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_options.idl
new file mode 100644
index 00000000000..2a0ec145517
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_options.idl
@@ -0,0 +1,9 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See https://webaudio.github.io/web-audio-api/#dictdef-waveshaperoptions
+dictionary WaveShaperOptions : AudioNodeOptions {
+ sequence<float> curve;
+ OverSampleType oversample = "none";
+}; \ No newline at end of file
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_processor.cc b/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_processor.cc
new file mode 100644
index 00000000000..ab5bfa13cdb
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_processor.cc
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2011, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#include <memory>
+#include "third_party/blink/renderer/modules/webaudio/wave_shaper_dsp_kernel.h"
+#include "third_party/blink/renderer/modules/webaudio/wave_shaper_processor.h"
+
+namespace blink {
+
+WaveShaperProcessor::WaveShaperProcessor(float sample_rate,
+ size_t number_of_channels)
+ : AudioDSPKernelProcessor(sample_rate, number_of_channels),
+ oversample_(kOverSampleNone) {}
+
+WaveShaperProcessor::~WaveShaperProcessor() {
+ if (IsInitialized())
+ Uninitialize();
+}
+
+std::unique_ptr<AudioDSPKernel> WaveShaperProcessor::CreateKernel() {
+ return std::make_unique<WaveShaperDSPKernel>(this);
+}
+
+void WaveShaperProcessor::SetCurve(const float* curve_data,
+ unsigned curve_length) {
+ DCHECK(IsMainThread());
+
+ // This synchronizes with process().
+ MutexLocker process_locker(process_lock_);
+
+ if (curve_length == 0 || !curve_data) {
+ curve_ = nullptr;
+ return;
+ }
+
+ // Copy the curve data, if any, to our internal buffer.
+ curve_ = std::make_unique<Vector<float>>(curve_length);
+ memcpy(curve_->data(), curve_data, sizeof(float) * curve_length);
+}
+
+void WaveShaperProcessor::SetOversample(OverSampleType oversample) {
+ // This synchronizes with process().
+ MutexLocker process_locker(process_lock_);
+
+ oversample_ = oversample;
+
+ if (oversample != kOverSampleNone) {
+ for (unsigned i = 0; i < kernels_.size(); ++i) {
+ WaveShaperDSPKernel* kernel =
+ static_cast<WaveShaperDSPKernel*>(kernels_[i].get());
+ kernel->LazyInitializeOversampling();
+ }
+ }
+}
+
+void WaveShaperProcessor::Process(const AudioBus* source,
+ AudioBus* destination,
+ size_t frames_to_process) {
+ if (!IsInitialized()) {
+ destination->Zero();
+ return;
+ }
+
+ bool channel_count_matches =
+ source->NumberOfChannels() == destination->NumberOfChannels() &&
+ source->NumberOfChannels() == kernels_.size();
+ DCHECK(channel_count_matches);
+ if (!channel_count_matches)
+ return;
+
+ // The audio thread can't block on this lock, so we call tryLock() instead.
+ MutexTryLocker try_locker(process_lock_);
+ if (try_locker.Locked()) {
+ // For each channel of our input, process using the corresponding
+ // WaveShaperDSPKernel into the output channel.
+ for (unsigned i = 0; i < kernels_.size(); ++i)
+ kernels_[i]->Process(source->Channel(i)->Data(),
+ destination->Channel(i)->MutableData(),
+ frames_to_process);
+ } else {
+ // Too bad - the tryLock() failed. We must be in the middle of a setCurve()
+ // call.
+ destination->Zero();
+ }
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_processor.h b/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_processor.h
new file mode 100644
index 00000000000..8c5ba1099af
--- /dev/null
+++ b/chromium/third_party/blink/renderer/modules/webaudio/wave_shaper_processor.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2011, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_WAVE_SHAPER_PROCESSOR_H_
+#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_WAVE_SHAPER_PROCESSOR_H_
+
+#include <memory>
+#include "base/memory/scoped_refptr.h"
+#include "third_party/blink/renderer/core/typed_arrays/dom_typed_array.h"
+#include "third_party/blink/renderer/modules/webaudio/audio_node.h"
+#include "third_party/blink/renderer/platform/audio/audio_dsp_kernel.h"
+#include "third_party/blink/renderer/platform/audio/audio_dsp_kernel_processor.h"
+#include "third_party/blink/renderer/platform/wtf/threading_primitives.h"
+
+namespace blink {
+
+// WaveShaperProcessor is an AudioDSPKernelProcessor which uses
+// WaveShaperDSPKernel objects to implement non-linear distortion effects.
+
+class WaveShaperProcessor final : public AudioDSPKernelProcessor {
+ public:
+ enum OverSampleType { kOverSampleNone, kOverSample2x, kOverSample4x };
+
+ WaveShaperProcessor(float sample_rate, size_t number_of_channels);
+
+ ~WaveShaperProcessor() override;
+
+ std::unique_ptr<AudioDSPKernel> CreateKernel() override;
+
+ void Process(const AudioBus* source,
+ AudioBus* destination,
+ size_t frames_to_process) override;
+
+ void SetCurve(const float* curve_data, unsigned curve_length);
+ Vector<float>* Curve() const { return curve_.get(); };
+
+ void SetOversample(OverSampleType);
+ OverSampleType Oversample() const { return oversample_; }
+
+ private:
+ // m_curve represents the non-linear shaping curve.
+ std::unique_ptr<Vector<float>> curve_;
+
+ OverSampleType oversample_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBAUDIO_WAVE_SHAPER_PROCESSOR_H_