summaryrefslogtreecommitdiff
path: root/chromium/base
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2018-08-28 15:28:34 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2018-08-28 13:54:51 +0000
commit2a19c63448c84c1805fb1a585c3651318bb86ca7 (patch)
treeeb17888e8531aa6ee5e85721bd553b832a7e5156 /chromium/base
parentb014812705fc80bff0a5c120dfcef88f349816dc (diff)
downloadqtwebengine-chromium-2a19c63448c84c1805fb1a585c3651318bb86ca7.tar.gz
BASELINE: Update Chromium to 69.0.3497.70
Change-Id: I2b7b56e4e7a8b26656930def0d4575dc32b900a0 Reviewed-by: Allan Sandfeld Jensen <allan.jensen@qt.io>
Diffstat (limited to 'chromium/base')
-rw-r--r--chromium/base/BUILD.gn168
-rw-r--r--chromium/base/OWNERS2
-rw-r--r--chromium/base/allocator/BUILD.gn8
-rw-r--r--chromium/base/allocator/allocator_extension.cc6
-rw-r--r--chromium/base/allocator/allocator_interception_mac.mm2
-rw-r--r--chromium/base/allocator/allocator_shim_default_dispatch_to_tcmalloc.cc4
-rw-r--r--chromium/base/allocator/allocator_shim_override_cpp_symbols.h8
-rw-r--r--chromium/base/allocator/debugallocation_shim.cc4
-rw-r--r--chromium/base/allocator/partition_allocator/address_space_randomization_unittest.cc5
-rw-r--r--chromium/base/allocator/partition_allocator/page_allocator_unittest.cc8
-rw-r--r--chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc86
-rw-r--r--chromium/base/android/jni_generator/BUILD.gn1
-rw-r--r--chromium/base/android/jni_generator/jni_exception_list.gni9
-rw-r--r--chromium/base/android/linker/BUILD.gn4
-rw-r--r--chromium/base/android/orderfile/BUILD.gn34
-rw-r--r--chromium/base/base_paths_fuchsia.cc19
-rw-r--r--chromium/base/base_switches.cc6
-rw-r--r--chromium/base/base_switches.h2
-rw-r--r--chromium/base/bind.h25
-rw-r--r--chromium/base/bind_internal.h65
-rw-r--r--chromium/base/callback.h8
-rw-r--r--chromium/base/callback_internal.cc12
-rw-r--r--chromium/base/callback_internal.h24
-rw-r--r--chromium/base/containers/queue.h2
-rw-r--r--chromium/base/containers/ring_buffer.h28
-rw-r--r--chromium/base/containers/stack.h2
-rw-r--r--chromium/base/debug/activity_tracker.h7
-rw-r--r--chromium/base/debug/profiler.cc2
-rw-r--r--chromium/base/debug/stack_trace_unittest.cc4
-rw-r--r--chromium/base/debug/task_annotator.cc14
-rw-r--r--chromium/base/debug/task_annotator.h12
-rw-r--r--chromium/base/debug/task_annotator_unittest.cc2
-rw-r--r--chromium/base/environment_unittest.cc6
-rw-r--r--chromium/base/files/file.cc15
-rw-r--r--chromium/base/files/file.h32
-rw-r--r--chromium/base/files/file_path_watcher_unittest.cc52
-rw-r--r--chromium/base/files/file_posix.cc7
-rw-r--r--chromium/base/files/file_proxy_unittest.cc4
-rw-r--r--chromium/base/files/file_unittest.cc14
-rw-r--r--chromium/base/files/file_util.h9
-rw-r--r--chromium/base/files/file_util_posix.cc58
-rw-r--r--chromium/base/files/file_util_unittest.cc92
-rw-r--r--chromium/base/files/file_win.cc5
-rw-r--r--chromium/base/files/important_file_writer.cc2
-rw-r--r--chromium/base/files/important_file_writer.h11
-rw-r--r--chromium/base/files/important_file_writer_unittest.cc14
-rw-r--r--chromium/base/fuchsia/async_dispatcher.cc31
-rw-r--r--chromium/base/fuchsia/async_dispatcher.h10
-rw-r--r--chromium/base/fuchsia/async_dispatcher_unittest.cc17
-rw-r--r--chromium/base/fuchsia/component_context.cc31
-rw-r--r--chromium/base/fuchsia/component_context.h11
-rw-r--r--chromium/base/fuchsia/default_job.cc14
-rw-r--r--chromium/base/fuchsia/default_job.h9
-rw-r--r--chromium/base/fuchsia/fidl_interface_request.cc13
-rw-r--r--chromium/base/fuchsia/fidl_interface_request.h22
-rw-r--r--chromium/base/fuchsia/file_utils.cc49
-rw-r--r--chromium/base/fuchsia/file_utils.h25
-rw-r--r--chromium/base/fuchsia/filtered_service_directory.cc63
-rw-r--r--chromium/base/fuchsia/filtered_service_directory.h50
-rw-r--r--chromium/base/fuchsia/filtered_service_directory_unittest.cc84
-rw-r--r--chromium/base/fuchsia/scoped_service_binding.h37
-rw-r--r--chromium/base/fuchsia/scoped_zx_handle.cc16
-rw-r--r--chromium/base/fuchsia/scoped_zx_handle.h38
-rw-r--r--chromium/base/fuchsia/service_directory.cc (renamed from chromium/base/fuchsia/services_directory.cc)47
-rw-r--r--chromium/base/fuchsia/service_directory.h (renamed from chromium/base/fuchsia/services_directory.h)24
-rw-r--r--chromium/base/fuchsia/service_directory_test_base.cc82
-rw-r--r--chromium/base/fuchsia/service_directory_test_base.h50
-rw-r--r--chromium/base/fuchsia/service_directory_unittest.cc86
-rw-r--r--chromium/base/fuchsia/services_directory_unittest.cc77
-rw-r--r--chromium/base/i18n/break_iterator_unittest.cc127
-rw-r--r--chromium/base/i18n/icu_util.cc62
-rw-r--r--chromium/base/i18n/number_formatting_unittest.cc29
-rw-r--r--chromium/base/i18n/rtl.cc5
-rw-r--r--chromium/base/i18n/rtl.h3
-rw-r--r--chromium/base/i18n/rtl_unittest.cc19
-rw-r--r--chromium/base/i18n/time_formatting_unittest.cc5
-rw-r--r--chromium/base/ios/ios_util.h3
-rw-r--r--chromium/base/ios/ios_util.mm5
-rw-r--r--chromium/base/json/json_reader_fuzzer.cc6
-rw-r--r--chromium/base/json/string_escape_fuzzer.cc4
-rw-r--r--chromium/base/logging.h3
-rw-r--r--chromium/base/logging_unittest.cc130
-rw-r--r--chromium/base/mac/bind_objc_block.h79
-rw-r--r--chromium/base/mac/bind_objc_block_unittest.mm106
-rw-r--r--chromium/base/mac/bind_objc_block_unittest_arc.mm71
-rw-r--r--chromium/base/mac/foundation_util.h5
-rw-r--r--chromium/base/mac/mac_util.h10
-rw-r--r--chromium/base/mac/mac_util.mm2
-rw-r--r--chromium/base/mac/mac_util_unittest.mm110
-rw-r--r--chromium/base/memory/discardable_shared_memory_unittest.cc2
-rw-r--r--chromium/base/memory/memory_pressure_monitor_mac_unittest.cc2
-rw-r--r--chromium/base/memory/memory_pressure_monitor_unittest.cc2
-rw-r--r--chromium/base/memory/platform_shared_memory_region.h12
-rw-r--r--chromium/base/memory/platform_shared_memory_region_android.cc11
-rw-r--r--chromium/base/memory/platform_shared_memory_region_fuchsia.cc35
-rw-r--r--chromium/base/memory/platform_shared_memory_region_mac.cc11
-rw-r--r--chromium/base/memory/platform_shared_memory_region_posix.cc39
-rw-r--r--chromium/base/memory/platform_shared_memory_region_unittest.cc63
-rw-r--r--chromium/base/memory/platform_shared_memory_region_win.cc11
-rw-r--r--chromium/base/memory/read_only_shared_memory_region.h6
-rw-r--r--chromium/base/memory/ref_counted_unittest.cc89
-rw-r--r--chromium/base/memory/scoped_refptr.h6
-rw-r--r--chromium/base/memory/shared_memory_fuchsia.cc39
-rw-r--r--chromium/base/memory/shared_memory_handle.h4
-rw-r--r--chromium/base/memory/shared_memory_posix.cc7
-rw-r--r--chromium/base/memory/shared_memory_region_unittest.cc1
-rw-r--r--chromium/base/memory/shared_memory_unittest.cc29
-rw-r--r--chromium/base/memory/unsafe_shared_memory_region.h6
-rw-r--r--chromium/base/memory/weak_ptr.cc22
-rw-r--r--chromium/base/memory/weak_ptr.h29
-rw-r--r--chromium/base/memory/weak_ptr_unittest.cc37
-rw-r--r--chromium/base/memory/writable_shared_memory_region.cc9
-rw-r--r--chromium/base/memory/writable_shared_memory_region.h12
-rw-r--r--chromium/base/message_loop/incoming_task_queue.cc233
-rw-r--r--chromium/base/message_loop/incoming_task_queue.h120
-rw-r--r--chromium/base/message_loop/message_loop.cc190
-rw-r--r--chromium/base/message_loop/message_loop.h34
-rw-r--r--chromium/base/message_loop/message_loop_current.cc8
-rw-r--r--chromium/base/message_loop/message_loop_current.h8
-rw-r--r--chromium/base/message_loop/message_loop_task_runner_perftest.cc191
-rw-r--r--chromium/base/message_loop/message_loop_unittest.cc84
-rw-r--r--chromium/base/message_loop/message_pump_android.cc336
-rw-r--r--chromium/base/message_loop/message_pump_android.h57
-rw-r--r--chromium/base/message_loop/message_pump_fuchsia.cc4
-rw-r--r--chromium/base/message_loop/message_pump_glib_unittest.cc97
-rw-r--r--chromium/base/message_loop/message_pump_perftest.cc63
-rw-r--r--chromium/base/message_loop/message_pump_win.cc16
-rw-r--r--chromium/base/message_loop/message_pump_win.h7
-rw-r--r--chromium/base/metrics/field_trial_unittest.cc17
-rw-r--r--chromium/base/metrics/histogram.cc41
-rw-r--r--chromium/base/metrics/histogram_base.cc5
-rw-r--r--chromium/base/metrics/histogram_base_unittest.cc86
-rw-r--r--chromium/base/metrics/histogram_functions_unittest.cc2
-rw-r--r--chromium/base/metrics/histogram_samples.h7
-rw-r--r--chromium/base/metrics/persistent_histogram_allocator.cc22
-rw-r--r--chromium/base/metrics/single_sample_metrics_unittest.cc2
-rw-r--r--chromium/base/native_library_fuchsia.cc10
-rw-r--r--chromium/base/nix/xdg_util.cc2
-rw-r--r--chromium/base/observer_list_unittest.cc20
-rw-r--r--chromium/base/optional.h16
-rw-r--r--chromium/base/path_service_unittest.cc4
-rw-r--r--chromium/base/pending_task.h6
-rw-r--r--chromium/base/posix/eintr_wrapper.h6
-rw-r--r--chromium/base/power_monitor/power_monitor.cc6
-rw-r--r--chromium/base/power_monitor/power_monitor.h5
-rw-r--r--chromium/base/power_monitor/power_monitor_device_source.cc5
-rw-r--r--chromium/base/power_monitor/power_monitor_device_source.h2
-rw-r--r--chromium/base/power_monitor/power_monitor_device_source_ios.mm11
-rw-r--r--chromium/base/power_monitor/power_monitor_source.h11
-rw-r--r--chromium/base/process/kill.h4
-rw-r--r--chromium/base/process/launch.h49
-rw-r--r--chromium/base/process/launch_fuchsia.cc281
-rw-r--r--chromium/base/process/launch_win.cc13
-rw-r--r--chromium/base/process/memory_linux.cc4
-rw-r--r--chromium/base/process/process.h4
-rw-r--r--chromium/base/process/process_fuchsia.cc60
-rw-r--r--chromium/base/process/process_info.h1
-rw-r--r--chromium/base/process/process_info_win.cc5
-rw-r--r--chromium/base/process/process_metrics.h4
-rw-r--r--chromium/base/process/process_metrics_fuchsia.cc2
-rw-r--r--chromium/base/process/process_posix.cc9
-rw-r--r--chromium/base/process/process_unittest.cc2
-rw-r--r--chromium/base/process/process_util_unittest.cc265
-rw-r--r--chromium/base/profiler/native_stack_sampler.h35
-rw-r--r--chromium/base/profiler/native_stack_sampler_mac.cc431
-rw-r--r--chromium/base/profiler/native_stack_sampler_posix.cc1
-rw-r--r--chromium/base/profiler/native_stack_sampler_win.cc224
-rw-r--r--chromium/base/profiler/stack_sampling_profiler.cc362
-rw-r--r--chromium/base/profiler/stack_sampling_profiler.h205
-rw-r--r--chromium/base/profiler/stack_sampling_profiler_unittest.cc698
-rw-r--r--chromium/base/profiler/win32_stack_frame_unwinder.cc8
-rw-r--r--chromium/base/rand_util.h2
-rw-r--r--chromium/base/rand_util_fuchsia.cc20
-rw-r--r--chromium/base/run_loop.cc18
-rw-r--r--chromium/base/run_loop.h7
-rw-r--r--chromium/base/sampling_heap_profiler/benchmark-octane.js76
-rw-r--r--chromium/base/sampling_heap_profiler/lock_free_address_hash_set.cc72
-rw-r--r--chromium/base/sampling_heap_profiler/lock_free_address_hash_set.h152
-rw-r--r--chromium/base/sampling_heap_profiler/lock_free_address_hash_set_unittest.cc183
-rw-r--r--chromium/base/sampling_heap_profiler/sampling_heap_profiler.cc120
-rw-r--r--chromium/base/sampling_heap_profiler/sampling_heap_profiler.h14
-rw-r--r--chromium/base/synchronization/atomic_flag.cc4
-rw-r--r--chromium/base/synchronization/atomic_flag.h2
-rw-r--r--chromium/base/synchronization/condition_variable_unittest.cc14
-rw-r--r--chromium/base/synchronization/waitable_event_mac.cc32
-rw-r--r--chromium/base/sys_info.h9
-rw-r--r--chromium/base/sys_info_chromeos.cc29
-rw-r--r--chromium/base/sys_info_posix.cc2
-rw-r--r--chromium/base/sys_info_unittest.cc28
-rw-r--r--chromium/base/syslog_logging.cc5
-rw-r--r--chromium/base/task/sequence_manager/enqueue_order.cc17
-rw-r--r--chromium/base/task/sequence_manager/enqueue_order.h71
-rw-r--r--chromium/base/task/sequence_manager/graceful_queue_shutdown_helper.cc42
-rw-r--r--chromium/base/task/sequence_manager/graceful_queue_shutdown_helper.h50
-rw-r--r--chromium/base/task/sequence_manager/intrusive_heap.h229
-rw-r--r--chromium/base/task/sequence_manager/intrusive_heap_unittest.cc378
-rw-r--r--chromium/base/task/sequence_manager/lazily_deallocated_deque.h364
-rw-r--r--chromium/base/task/sequence_manager/lazily_deallocated_deque_unittest.cc364
-rw-r--r--chromium/base/task/sequence_manager/lazy_now.cc2
-rw-r--r--chromium/base/task/sequence_manager/moveable_auto_lock.h41
-rw-r--r--chromium/base/task/sequence_manager/real_time_domain.cc48
-rw-r--r--chromium/base/task/sequence_manager/real_time_domain.h37
-rw-r--r--chromium/base/task/sequence_manager/sequence_manager.cc26
-rw-r--r--chromium/base/task/sequence_manager/sequence_manager.h132
-rw-r--r--chromium/base/task/sequence_manager/sequence_manager_impl.cc724
-rw-r--r--chromium/base/task/sequence_manager/sequence_manager_impl.h341
-rw-r--r--chromium/base/task/sequence_manager/sequence_manager_impl_unittest.cc3285
-rw-r--r--chromium/base/task/sequence_manager/sequence_manager_perftest.cc306
-rw-r--r--chromium/base/task/sequence_manager/task_queue.cc289
-rw-r--r--chromium/base/task/sequence_manager/task_queue.h368
-rw-r--r--chromium/base/task/sequence_manager/task_queue_impl.cc1016
-rw-r--r--chromium/base/task/sequence_manager/task_queue_impl.h471
-rw-r--r--chromium/base/task/sequence_manager/task_queue_selector.cc407
-rw-r--r--chromium/base/task/sequence_manager/task_queue_selector.h225
-rw-r--r--chromium/base/task/sequence_manager/task_queue_selector_logic.h37
-rw-r--r--chromium/base/task/sequence_manager/task_queue_selector_unittest.cc885
-rw-r--r--chromium/base/task/sequence_manager/task_time_observer.h32
-rw-r--r--chromium/base/task/sequence_manager/thread_controller.h11
-rw-r--r--chromium/base/task/sequence_manager/thread_controller_impl.cc273
-rw-r--r--chromium/base/task/sequence_manager/thread_controller_impl.h130
-rw-r--r--chromium/base/task/sequence_manager/thread_controller_with_message_pump_impl.cc205
-rw-r--r--chromium/base/task/sequence_manager/thread_controller_with_message_pump_impl.h109
-rw-r--r--chromium/base/task/sequence_manager/time_domain.cc136
-rw-r--r--chromium/base/task/sequence_manager/time_domain.h139
-rw-r--r--chromium/base/task/sequence_manager/time_domain_unittest.cc324
-rw-r--r--chromium/base/task/sequence_manager/work_queue.cc236
-rw-r--r--chromium/base/task/sequence_manager/work_queue.h152
-rw-r--r--chromium/base/task/sequence_manager/work_queue_sets.cc172
-rw-r--r--chromium/base/task/sequence_manager/work_queue_sets.h102
-rw-r--r--chromium/base/task/sequence_manager/work_queue_sets_unittest.cc328
-rw-r--r--chromium/base/task/sequence_manager/work_queue_unittest.cc475
-rw-r--r--chromium/base/task_runner.h4
-rw-r--r--chromium/base/task_scheduler/environment_config.cc26
-rw-r--r--chromium/base/task_scheduler/environment_config.h17
-rw-r--r--chromium/base/task_scheduler/priority_queue_unittest.cc4
-rw-r--r--chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager.cc20
-rw-r--r--chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager_unittest.cc40
-rw-r--r--chromium/base/task_scheduler/scheduler_worker.cc18
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_pool.cc2
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_pool_impl.cc240
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_pool_impl.h105
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_pool_impl_unittest.cc861
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_pool_params.cc4
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_pool_params.h17
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_pool_unittest.cc18
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_unittest.cc43
-rw-r--r--chromium/base/task_scheduler/service_thread.cc60
-rw-r--r--chromium/base/task_scheduler/service_thread.h10
-rw-r--r--chromium/base/task_scheduler/service_thread_unittest.cc23
-rw-r--r--chromium/base/task_scheduler/task_scheduler.h3
-rw-r--r--chromium/base/task_scheduler/task_scheduler_impl.cc77
-rw-r--r--chromium/base/task_scheduler/task_scheduler_impl.h10
-rw-r--r--chromium/base/task_scheduler/task_scheduler_impl_unittest.cc102
-rw-r--r--chromium/base/task_scheduler/task_tracker.cc22
-rw-r--r--chromium/base/task_scheduler/task_tracker.h8
-rw-r--r--chromium/base/task_scheduler/task_tracker_posix_unittest.cc4
-rw-r--r--chromium/base/task_scheduler/task_tracker_unittest.cc110
-rw-r--r--chromium/base/task_scheduler/task_traits.h5
-rw-r--r--chromium/base/task_scheduler/tracked_ref.h4
-rw-r--r--chromium/base/test/BUILD.gn34
-rw-r--r--chromium/base/test/fontconfig_util_linux.cc423
-rw-r--r--chromium/base/test/fontconfig_util_linux.h18
-rw-r--r--chromium/base/third_party/symbolize/symbolize.cc7
-rw-r--r--chromium/base/threading/OWNERS2
-rw-r--r--chromium/base/threading/platform_thread_unittest.cc8
-rw-r--r--chromium/base/threading/simple_thread.cc2
-rw-r--r--chromium/base/threading/thread_local_storage.h14
-rw-r--r--chromium/base/threading/thread_perftest.cc5
-rw-r--r--chromium/base/threading/thread_restrictions.cc2
-rw-r--r--chromium/base/threading/thread_restrictions.h48
-rw-r--r--chromium/base/time/time_unittest.cc11
-rw-r--r--chromium/base/time/time_win_unittest.cc12
-rw-r--r--chromium/base/timer/mock_timer.cc92
-rw-r--r--chromium/base/timer/mock_timer.h77
-rw-r--r--chromium/base/timer/mock_timer_unittest.cc8
-rw-r--r--chromium/base/timer/timer.cc82
-rw-r--r--chromium/base/timer/timer.h165
-rw-r--r--chromium/base/timer/timer_unittest.cc18
-rw-r--r--chromium/base/trace_event/heap_profiler_heap_dump_writer.cc323
-rw-r--r--chromium/base/trace_event/heap_profiler_heap_dump_writer.h115
-rw-r--r--chromium/base/trace_event/heap_profiler_heap_dump_writer_unittest.cc330
-rw-r--r--chromium/base/trace_event/heap_profiler_serialization_state.cc27
-rw-r--r--chromium/base/trace_event/heap_profiler_serialization_state.h80
-rw-r--r--chromium/base/trace_event/heap_profiler_stack_frame_deduplicator.cc195
-rw-r--r--chromium/base/trace_event/heap_profiler_stack_frame_deduplicator.h94
-rw-r--r--chromium/base/trace_event/heap_profiler_stack_frame_deduplicator_unittest.cc152
-rw-r--r--chromium/base/trace_event/heap_profiler_type_name_deduplicator.cc82
-rw-r--r--chromium/base/trace_event/heap_profiler_type_name_deduplicator.h45
-rw-r--r--chromium/base/trace_event/heap_profiler_type_name_deduplicator_unittest.cc83
-rw-r--r--chromium/base/trace_event/java_heap_dump_provider_android_unittest.cc3
-rw-r--r--chromium/base/trace_event/memory_allocator_dump_unittest.cc12
-rw-r--r--chromium/base/trace_event/memory_dump_manager.cc344
-rw-r--r--chromium/base/trace_event/memory_dump_manager.h59
-rw-r--r--chromium/base/trace_event/memory_dump_manager_unittest.cc180
-rw-r--r--chromium/base/trace_event/memory_dump_provider.h31
-rw-r--r--chromium/base/trace_event/memory_dump_request_args.cc4
-rw-r--r--chromium/base/trace_event/memory_dump_request_args.h1
-rw-r--r--chromium/base/trace_event/memory_infra_background_whitelist.cc4
-rw-r--r--chromium/base/trace_event/memory_infra_background_whitelist_unittest.cc37
-rw-r--r--chromium/base/trace_event/memory_peak_detector.cc288
-rw-r--r--chromium/base/trace_event/memory_peak_detector.h184
-rw-r--r--chromium/base/trace_event/memory_peak_detector_unittest.cc564
-rw-r--r--chromium/base/trace_event/process_memory_dump.cc34
-rw-r--r--chromium/base/trace_event/process_memory_dump.h24
-rw-r--r--chromium/base/trace_event/process_memory_dump_unittest.cc93
-rw-r--r--chromium/base/trace_event/trace_category_unittest.cc9
-rw-r--r--chromium/base/trace_event/trace_config.cc63
-rw-r--r--chromium/base/trace_event/trace_config.h32
-rw-r--r--chromium/base/trace_event/trace_config_memory_test_util.h26
-rw-r--r--chromium/base/trace_event/trace_config_unittest.cc12
-rw-r--r--chromium/base/trace_event/trace_event_etw_export_win.cc31
-rw-r--r--chromium/base/trace_event/trace_event_impl.h4
-rw-r--r--chromium/base/trace_event/trace_event_memory_overhead.cc2
-rw-r--r--chromium/base/trace_event/trace_event_memory_overhead.h1
-rw-r--r--chromium/base/trace_event/trace_log.cc2
-rw-r--r--chromium/base/win/async_operation.h154
-rw-r--r--chromium/base/win/reference.h49
-rw-r--r--chromium/base/win/reference_unittest.cc38
-rw-r--r--chromium/base/win/vector.cc24
-rw-r--r--chromium/base/win/vector.h366
-rw-r--r--chromium/base/win/vector_unittest.cc638
321 files changed, 21643 insertions, 7308 deletions
diff --git a/chromium/base/BUILD.gn b/chromium/base/BUILD.gn
index 943412faefd..7fb93868d6e 100644
--- a/chromium/base/BUILD.gn
+++ b/chromium/base/BUILD.gn
@@ -68,7 +68,7 @@ if (is_android) {
}
if (is_fuchsia) {
- import("//third_party/fuchsia-sdk/fidl_library.gni")
+ import("//build/config/fuchsia/fidl_library.gni")
}
config("base_flags") {
@@ -117,7 +117,10 @@ if (is_nacl_nonsfi) {
if (is_android) {
config("android_system_libs") {
- libs = [ "log" ] # Used by logging.cc.
+ libs = [
+ "android",
+ "log", # Used by logging.cc.
+ ]
}
}
@@ -162,6 +165,9 @@ jumbo_component("base") {
"android/android_hardware_buffer_abi.h",
"android/android_hardware_buffer_compat.cc",
"android/android_hardware_buffer_compat.h",
+ "android/android_image_reader_abi.h",
+ "android/android_image_reader_compat.cc",
+ "android/android_image_reader_compat.h",
"android/animation_frame_time_histogram.cc",
"android/apk_assets.cc",
"android/apk_assets.h",
@@ -173,6 +179,7 @@ jumbo_component("base") {
"android/build_info.h",
"android/callback_android.cc",
"android/callback_android.h",
+ "android/child_process_binding_types.h",
"android/child_process_service.cc",
"android/command_line_android.cc",
"android/content_uri_utils.cc",
@@ -428,7 +435,6 @@ jumbo_component("base") {
"mac/authorization_util.h",
"mac/authorization_util.mm",
"mac/availability.h",
- "mac/bind_objc_block.h",
"mac/bundle_locations.h",
"mac/bundle_locations.mm",
"mac/call_with_eh_frame.cc",
@@ -714,6 +720,8 @@ jumbo_component("base") {
"rand_util_win.cc",
"run_loop.cc",
"run_loop.h",
+ "sampling_heap_profiler/lock_free_address_hash_set.cc",
+ "sampling_heap_profiler/lock_free_address_hash_set.h",
"sampling_heap_profiler/sampling_heap_profiler.cc",
"sampling_heap_profiler/sampling_heap_profiler.h",
"scoped_clear_errno.h",
@@ -810,10 +818,40 @@ jumbo_component("base") {
"system_monitor/system_monitor.h",
"task/cancelable_task_tracker.cc",
"task/cancelable_task_tracker.h",
+ "task/sequence_manager/enqueue_order.cc",
+ "task/sequence_manager/enqueue_order.h",
+ "task/sequence_manager/graceful_queue_shutdown_helper.cc",
+ "task/sequence_manager/graceful_queue_shutdown_helper.h",
+ "task/sequence_manager/intrusive_heap.h",
+ "task/sequence_manager/lazily_deallocated_deque.h",
"task/sequence_manager/lazy_now.cc",
"task/sequence_manager/lazy_now.h",
+ "task/sequence_manager/real_time_domain.cc",
+ "task/sequence_manager/real_time_domain.h",
+ "task/sequence_manager/sequence_manager.cc",
+ "task/sequence_manager/sequence_manager.h",
+ "task/sequence_manager/sequence_manager_impl.cc",
+ "task/sequence_manager/sequence_manager_impl.h",
"task/sequence_manager/sequenced_task_source.h",
+ "task/sequence_manager/task_queue.cc",
+ "task/sequence_manager/task_queue.h",
+ "task/sequence_manager/task_queue_impl.cc",
+ "task/sequence_manager/task_queue_impl.h",
+ "task/sequence_manager/task_queue_selector.cc",
+ "task/sequence_manager/task_queue_selector.h",
+ "task/sequence_manager/task_queue_selector_logic.h",
+ "task/sequence_manager/task_time_observer.h",
"task/sequence_manager/thread_controller.h",
+ "task/sequence_manager/thread_controller_impl.cc",
+ "task/sequence_manager/thread_controller_impl.h",
+ "task/sequence_manager/thread_controller_with_message_pump_impl.cc",
+ "task/sequence_manager/thread_controller_with_message_pump_impl.h",
+ "task/sequence_manager/time_domain.cc",
+ "task/sequence_manager/time_domain.h",
+ "task/sequence_manager/work_queue.cc",
+ "task/sequence_manager/work_queue.h",
+ "task/sequence_manager/work_queue_sets.cc",
+ "task/sequence_manager/work_queue_sets.h",
"task_runner.cc",
"task_runner.h",
"task_runner_util.h",
@@ -935,8 +973,6 @@ jumbo_component("base") {
"timer/elapsed_timer.h",
"timer/hi_res_timer_manager.h",
"timer/hi_res_timer_manager_win.cc",
- "timer/mock_timer.cc",
- "timer/mock_timer.h",
"timer/timer.cc",
"timer/timer.h",
"trace_event/auto_open_close_event.cc",
@@ -955,14 +991,6 @@ jumbo_component("base") {
"trace_event/heap_profiler_allocation_context_tracker.h",
"trace_event/heap_profiler_event_filter.cc",
"trace_event/heap_profiler_event_filter.h",
- "trace_event/heap_profiler_heap_dump_writer.cc",
- "trace_event/heap_profiler_heap_dump_writer.h",
- "trace_event/heap_profiler_serialization_state.cc",
- "trace_event/heap_profiler_serialization_state.h",
- "trace_event/heap_profiler_stack_frame_deduplicator.cc",
- "trace_event/heap_profiler_stack_frame_deduplicator.h",
- "trace_event/heap_profiler_type_name_deduplicator.cc",
- "trace_event/heap_profiler_type_name_deduplicator.h",
"trace_event/java_heap_dump_provider_android.cc",
"trace_event/java_heap_dump_provider_android.h",
"trace_event/malloc_dump_provider.cc",
@@ -983,8 +1011,6 @@ jumbo_component("base") {
"trace_event/memory_dump_scheduler.h",
"trace_event/memory_infra_background_whitelist.cc",
"trace_event/memory_infra_background_whitelist.h",
- "trace_event/memory_peak_detector.cc",
- "trace_event/memory_peak_detector.h",
"trace_event/memory_usage_estimator.cc",
"trace_event/memory_usage_estimator.h",
"trace_event/process_memory_dump.cc",
@@ -1057,6 +1083,7 @@ jumbo_component("base") {
"win/patch_util.h",
"win/process_startup_helper.cc",
"win/process_startup_helper.h",
+ "win/reference.h",
"win/registry.cc",
"win/registry.h",
"win/resource_util.cc",
@@ -1089,6 +1116,8 @@ jumbo_component("base") {
"win/startup_information.cc",
"win/startup_information.h",
"win/typed_event_handler.h",
+ "win/vector.cc",
+ "win/vector.h",
"win/wait_chain.cc",
"win/wait_chain.h",
"win/win_util.cc",
@@ -1187,6 +1216,7 @@ jumbo_component("base") {
defines = []
data = []
data_deps = []
+ libs = []
configs += [
":base_flags",
@@ -1208,6 +1238,7 @@ jumbo_component("base") {
":build_date",
":cfi_buildflags",
":debugging_buildflags",
+ ":orderfile_buildflags",
":partition_alloc_buildflags",
":protected_memory_buildflags",
":synchronization_buildflags",
@@ -1219,7 +1250,7 @@ jumbo_component("base") {
# more robust check for this.
if (!use_sysroot && (is_android || (is_linux && !is_chromecast)) &&
host_toolchain != "//build/toolchain/cros:host") {
- libs = [ "atomic" ]
+ libs += [ "atomic" ]
}
if (use_allocator_shim) {
@@ -1321,7 +1352,7 @@ jumbo_component("base") {
# This is actually a linker script, but it can be added to the link in the
# same way as a library.
- libs = [ "android/library_loader/anchor_functions.lds" ]
+ libs += [ "android/library_loader/anchor_functions.lds" ]
}
# Chromeos.
@@ -1353,12 +1384,15 @@ jumbo_component("base") {
"fuchsia/default_job.h",
"fuchsia/fidl_interface_request.cc",
"fuchsia/fidl_interface_request.h",
+ "fuchsia/file_utils.cc",
+ "fuchsia/file_utils.h",
+ "fuchsia/filtered_service_directory.cc",
+ "fuchsia/filtered_service_directory.h",
"fuchsia/fuchsia_logging.cc",
"fuchsia/fuchsia_logging.h",
- "fuchsia/scoped_zx_handle.cc",
"fuchsia/scoped_zx_handle.h",
- "fuchsia/services_directory.cc",
- "fuchsia/services_directory.h",
+ "fuchsia/service_directory.cc",
+ "fuchsia/service_directory.h",
"memory/platform_shared_memory_region_fuchsia.cc",
"memory/protected_memory_posix.cc",
"memory/shared_memory_fuchsia.cc",
@@ -1410,15 +1444,14 @@ jumbo_component("base") {
# TODO(https://crbug.com/841171): Move these back to |deps|.
public_deps += [
"//third_party/fuchsia-sdk:async",
- "//third_party/fuchsia-sdk:launchpad",
+ "//third_party/fuchsia-sdk:fdio",
+ "//third_party/fuchsia-sdk:zx",
]
deps += [
"//third_party/fuchsia-sdk:async_default",
- "//third_party/fuchsia-sdk:fdio",
"//third_party/fuchsia-sdk:fidl",
"//third_party/fuchsia-sdk:svc",
- "//third_party/fuchsia-sdk:zx",
]
}
@@ -1577,7 +1610,7 @@ jumbo_component("base") {
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
configs += [ "//build/config/compiler:no_size_t_to_int_warning" ]
- libs = [
+ libs += [
"cfgmgr32.lib",
"powrprof.lib",
"propsys.lib",
@@ -1605,7 +1638,7 @@ jumbo_component("base") {
"time/time_mac.cc",
]
- libs = [
+ libs += [
"ApplicationServices.framework",
"AppKit.framework",
"bsm",
@@ -1810,8 +1843,6 @@ jumbo_component("base") {
buildflag_header("cfi_buildflags") {
header = "cfi_buildflags.h"
- # buildflag entries added to this header must also must be manually added to
- # tools/gn/bootstrap/bootstrap.py
flags = [
# TODO(pcc): remove CFI_CAST_CHECK, see https://crbug.com/626794.
"CFI_CAST_CHECK=$is_cfi && $use_cfi_cast",
@@ -1825,8 +1856,6 @@ buildflag_header("debugging_buildflags") {
header = "debugging_buildflags.h"
header_dir = "base/debug"
- # buildflag entries added to this header must also must be manually added to
- # tools/gn/bootstrap/bootstrap.py
flags = [
"ENABLE_LOCATION_SOURCE=$enable_location_source",
"ENABLE_PROFILING=$enable_profiling",
@@ -1836,14 +1865,24 @@ buildflag_header("debugging_buildflags") {
]
}
+buildflag_header("orderfile_buildflags") {
+ header = "orderfile_buildflags.h"
+ header_dir = "base/android/orderfile"
+ using_order_profiling = is_android && use_order_profiling
+ using_devtools_dumping = is_android && devtools_instrumentation_dumping
+
+ flags = [
+ "DEVTOOLS_INSTRUMENTATION_DUMPING=$using_devtools_dumping",
+ "ORDERFILE_INSTRUMENTATION=$using_order_profiling",
+ ]
+}
+
# Build flags for ProtectedMemory, temporary workaround for crbug.com/792777
# TODO(vtsyrklevich): Remove once support for gold on Android/CrOs is dropped
buildflag_header("protected_memory_buildflags") {
header = "protected_memory_buildflags.h"
header_dir = "base/memory"
- # buildflag entries added to this header must also must be manually added to
- # tools/gn/bootstrap/bootstrap.py
flags = [ "USE_LLD=$use_lld" ]
}
@@ -1860,8 +1899,6 @@ buildflag_header("anchor_functions_buildflags") {
header_dir = "base/android/library_loader"
_supports_code_ordering = current_cpu == "arm"
- # buildflag entries added to this header must also must be manually added to
- # tools/gn/bootstrap/bootstrap.py
flags = [
"USE_LLD=$use_lld",
"SUPPORTS_CODE_ORDERING=$_supports_code_ordering",
@@ -1975,7 +2012,9 @@ component("i18n") {
test("base_perftests") {
sources = [
"message_loop/message_loop_perftest.cc",
+ "message_loop/message_loop_task_runner_perftest.cc",
"message_loop/message_pump_perftest.cc",
+ "task/sequence_manager/sequence_manager_perftest.cc",
# "test/run_all_unittests.cc",
"json/json_perftest.cc",
@@ -2015,7 +2054,6 @@ if (!is_ios) {
]
deps = [
":base",
- "//build/config:exe_and_shlib_deps",
"//build/win:default_exe_manifest",
"//third_party/icu:icuuc",
]
@@ -2027,7 +2065,6 @@ if (!is_ios) {
]
deps = [
":base",
- "//build/config:exe_and_shlib_deps",
"//build/win:default_exe_manifest",
]
}
@@ -2049,9 +2086,6 @@ if (is_win) {
"cfgmgr32.lib",
"shell32.lib",
]
- deps = [
- "//build/config:exe_and_shlib_deps",
- ]
}
loadable_module("scoped_handle_test_dll") {
@@ -2072,9 +2106,6 @@ if (is_win || is_mac) {
sources = [
"profiler/test_support_library.cc",
]
- deps = [
- "//build/config:exe_and_shlib_deps",
- ]
}
}
}
@@ -2082,6 +2113,7 @@ if (is_win || is_mac) {
bundle_data("base_unittests_bundle_data") {
testonly = true
sources = [
+ "//tools/metrics/histograms/enums.xml",
"test/data/file_util/binary_file.bin",
"test/data/file_util/binary_file_diff.bin",
"test/data/file_util/binary_file_same.bin",
@@ -2143,6 +2175,7 @@ test("base_unittests") {
"allocator/allocator_interception_mac_unittest.mm",
"allocator/malloc_zone_functions_mac_unittest.cc",
"allocator/tcmalloc_unittest.cc",
+ "android/android_image_reader_compat_unittest.cc",
"android/application_status_listener_unittest.cc",
"android/content_uri_utils_unittest.cc",
"android/jni_android_unittest.cc",
@@ -2312,6 +2345,7 @@ test("base_unittests") {
"rand_util_unittest.cc",
"run_loop_unittest.cc",
"safe_numerics_unittest.cc",
+ "sampling_heap_profiler/lock_free_address_hash_set_unittest.cc",
"scoped_clear_errno_unittest.cc",
"scoped_generic_unittest.cc",
"scoped_native_library_unittest.cc",
@@ -2349,6 +2383,13 @@ test("base_unittests") {
"sys_info_unittest.cc",
"system_monitor/system_monitor_unittest.cc",
"task/cancelable_task_tracker_unittest.cc",
+ "task/sequence_manager/intrusive_heap_unittest.cc",
+ "task/sequence_manager/lazily_deallocated_deque_unittest.cc",
+ "task/sequence_manager/sequence_manager_impl_unittest.cc",
+ "task/sequence_manager/task_queue_selector_unittest.cc",
+ "task/sequence_manager/time_domain_unittest.cc",
+ "task/sequence_manager/work_queue_sets_unittest.cc",
+ "task/sequence_manager/work_queue_unittest.cc",
"task_runner_util_unittest.cc",
"task_scheduler/delayed_task_manager_unittest.cc",
"task_scheduler/lazy_task_runner_unittest.cc",
@@ -2373,7 +2414,9 @@ test("base_unittests") {
"task_scheduler/test_utils.h",
"task_scheduler/tracked_ref_unittest.cc",
"template_util_unittest.cc",
- "test/histogram_tester_unittest.cc",
+ "test/metrics/histogram_enum_reader_unittest.cc",
+ "test/metrics/histogram_tester_unittest.cc",
+ "test/metrics/user_action_tester_unittest.cc",
"test/mock_callback_unittest.cc",
"test/scoped_feature_list_unittest.cc",
"test/scoped_mock_time_message_loop_task_runner_unittest.cc",
@@ -2382,7 +2425,6 @@ test("base_unittests") {
"test/test_pending_task_unittest.cc",
"test/test_reg_util_win_unittest.cc",
"test/trace_event_analyzer_unittest.cc",
- "test/user_action_tester_unittest.cc",
"thread_annotations_unittest.cc",
"threading/platform_thread_unittest.cc",
"threading/post_task_and_reply_impl_unittest.cc",
@@ -2410,14 +2452,11 @@ test("base_unittests") {
"trace_event/blame_context_unittest.cc",
"trace_event/event_name_filter_unittest.cc",
"trace_event/heap_profiler_allocation_context_tracker_unittest.cc",
- "trace_event/heap_profiler_heap_dump_writer_unittest.cc",
- "trace_event/heap_profiler_stack_frame_deduplicator_unittest.cc",
- "trace_event/heap_profiler_type_name_deduplicator_unittest.cc",
"trace_event/java_heap_dump_provider_android_unittest.cc",
"trace_event/memory_allocator_dump_unittest.cc",
"trace_event/memory_dump_manager_unittest.cc",
"trace_event/memory_dump_scheduler_unittest.cc",
- "trace_event/memory_peak_detector_unittest.cc",
+ "trace_event/memory_infra_background_whitelist_unittest.cc",
"trace_event/memory_usage_estimator_unittest.cc",
"trace_event/process_memory_dump_unittest.cc",
"trace_event/trace_category_unittest.cc",
@@ -2447,6 +2486,7 @@ test("base_unittests") {
"win/message_window_unittest.cc",
"win/object_watcher_unittest.cc",
"win/pe_image_unittest.cc",
+ "win/reference_unittest.cc",
"win/registry_unittest.cc",
"win/scoped_bstr_unittest.cc",
"win/scoped_handle_unittest.cc",
@@ -2457,6 +2497,7 @@ test("base_unittests") {
"win/shortcut_unittest.cc",
"win/startup_information_unittest.cc",
"win/typed_event_handler_unittest.cc",
+ "win/vector_unittest.cc",
"win/wait_chain_unittest.cc",
"win/win_includes_unittest.cc",
"win/win_util_unittest.cc",
@@ -2495,6 +2536,7 @@ test("base_unittests") {
data = [
"test/data/",
+ "//tools/metrics/histograms/enums.xml",
]
if (is_posix) {
@@ -2616,13 +2658,19 @@ test("base_unittests") {
sources += [
"files/dir_reader_posix_unittest.cc",
"files/file_descriptor_watcher_posix_unittest.cc",
- "fuchsia/services_directory_unittest.cc",
+ "fuchsia/async_dispatcher_unittest.cc",
+ "fuchsia/filtered_service_directory_unittest.cc",
+ "fuchsia/service_directory_test_base.cc",
+ "fuchsia/service_directory_test_base.h",
+ "fuchsia/service_directory_unittest.cc",
"message_loop/message_loop_io_posix_unittest.cc",
"posix/file_descriptor_shuffle_unittest.cc",
"task_scheduler/task_tracker_posix_unittest.cc",
]
- sources += [ "fuchsia/async_dispatcher_unittest.cc" ]
+ # TODO(crbug.com/851641): FilePatchWatcherImpl is not implemented.
+ sources -= [ "files/file_path_watcher_unittest.cc" ]
+
deps += [
":test_fidl",
"//third_party/fuchsia-sdk:async",
@@ -2749,7 +2797,6 @@ if (is_android) {
"android/java/src/org/chromium/base/PathUtils.java",
"android/java/src/org/chromium/base/PowerMonitor.java",
"android/java/src/org/chromium/base/SysUtils.java",
- "android/java/src/org/chromium/base/SystemMessageHandler.java",
"android/java/src/org/chromium/base/ThreadUtils.java",
"android/java/src/org/chromium/base/ThrowUncaughtException.java",
"android/java/src/org/chromium/base/TimeUtils.java",
@@ -2824,13 +2871,11 @@ if (is_android) {
"android/java/src/org/chromium/base/PathUtils.java",
"android/java/src/org/chromium/base/PowerMonitor.java",
"android/java/src/org/chromium/base/Promise.java",
- "android/java/src/org/chromium/base/ResourceExtractor.java",
"android/java/src/org/chromium/base/SecureRandomInitializer.java",
"android/java/src/org/chromium/base/StreamUtil.java",
"android/java/src/org/chromium/base/StrictModeContext.java",
"android/java/src/org/chromium/base/Supplier.java",
"android/java/src/org/chromium/base/SysUtils.java",
- "android/java/src/org/chromium/base/SystemMessageHandler.java",
"android/java/src/org/chromium/base/ThreadUtils.java",
"android/java/src/org/chromium/base/ThrowUncaughtException.java",
"android/java/src/org/chromium/base/TimeUtils.java",
@@ -2841,6 +2886,7 @@ if (is_android) {
"android/java/src/org/chromium/base/annotations/AccessedByNative.java",
"android/java/src/org/chromium/base/annotations/CalledByNative.java",
"android/java/src/org/chromium/base/annotations/CalledByNativeUnchecked.java",
+ "android/java/src/org/chromium/base/annotations/DoNotInline.java",
"android/java/src/org/chromium/base/annotations/JNIAdditionalImport.java",
"android/java/src/org/chromium/base/annotations/JNINamespace.java",
"android/java/src/org/chromium/base/annotations/MainDex.java",
@@ -2848,7 +2894,6 @@ if (is_android) {
"android/java/src/org/chromium/base/annotations/NativeClassQualifiedName.java",
"android/java/src/org/chromium/base/annotations/RemovableInRelease.java",
"android/java/src/org/chromium/base/annotations/UsedByReflection.java",
- "android/java/src/org/chromium/base/library_loader/LegacyLinker.java",
"android/java/src/org/chromium/base/library_loader/LibraryLoader.java",
"android/java/src/org/chromium/base/library_loader/Linker.java",
"android/java/src/org/chromium/base/library_loader/LoaderErrors.java",
@@ -2869,6 +2914,7 @@ if (is_android) {
"android/java/src/org/chromium/base/memory/MemoryPressureMonitor.java",
"android/java/src/org/chromium/base/memory/MemoryPressureCallback.java",
"android/java/src/org/chromium/base/memory/MemoryPressureUma.java",
+ "//third_party/android_async_task/java/src/org/chromium/base/AsyncTask.java",
]
# New versions of BuildConfig.java and NativeLibraries.java
@@ -2897,6 +2943,10 @@ if (is_android) {
"//third_party/junit:junit",
]
java_files = [
+ # AssertsTest doesn't really belong in //base but it's preferable to
+ # stick it here than create another target for a single test.
+ "android/javatests/src/org/chromium/base/AssertsTest.java",
+ "android/javatests/src/org/chromium/base/AsyncTaskTest.java",
"android/javatests/src/org/chromium/base/AdvancedMockContextTest.java",
"android/javatests/src/org/chromium/base/ApiCompatibilityUtilsTest.java",
"android/javatests/src/org/chromium/base/CommandLineInitUtilTest.java",
@@ -2922,13 +2972,12 @@ if (is_android) {
"//third_party/android_tools:android_support_annotations_java",
"//third_party/android_tools:android_support_chromium_java",
"//third_party/android_tools:android_support_compat_java",
+ "//third_party/android_tools:android_test_mock_java",
"//third_party/hamcrest:hamcrest_core_java",
"//third_party/junit",
"//third_party/ub-uiautomator:ub_uiautomator_java",
]
- deps += android_extra_test_deps
-
java_files = [
"test/android/javatests/src/org/chromium/base/test/BaseJUnit4ClassRunner.java",
"test/android/javatests/src/org/chromium/base/test/BaseChromiumAndroidJUnitRunner.java",
@@ -2966,7 +3015,6 @@ if (is_android) {
"test/android/javatests/src/org/chromium/base/test/util/InstrumentationUtils.java",
"test/android/javatests/src/org/chromium/base/test/util/IntegrationTest.java",
"test/android/javatests/src/org/chromium/base/test/util/Manual.java",
- "test/android/javatests/src/org/chromium/base/test/util/ManualSkipCheck.java",
"test/android/javatests/src/org/chromium/base/test/util/Matchers.java",
"test/android/javatests/src/org/chromium/base/test/util/MetricsUtils.java",
"test/android/javatests/src/org/chromium/base/test/util/MinAndroidSdkLevel.java",
@@ -3002,10 +3050,16 @@ if (is_android) {
java_files = [
"android/junit/src/org/chromium/base/metrics/test/ShadowRecordHistogram.java",
"test/android/junit/src/org/chromium/base/test/BaseRobolectricTestRunner.java",
+ "test/android/junit/src/org/chromium/base/test/asynctask/BackgroundShadowAsyncTask.java",
+ "test/android/junit/src/org/chromium/base/test/asynctask/CustomShadowAsyncTask.java",
+ "test/android/junit/src/org/chromium/base/test/util/TestRunnerTestRule.java",
+ "//third_party/robolectric/custom_asynctask/java/src/org/chromium/base/test/asynctask/ShadowAsyncTask.java",
+ "//third_party/robolectric/custom_asynctask/java/src/org/chromium/base/test/asynctask/ShadowAsyncTaskBridge.java",
]
deps = [
":base_java",
"//testing/android/junit:junit_test_support",
+ "//third_party/android_support_test_runner:runner_java",
"//third_party/robolectric:robolectric_all_java",
]
}
@@ -3024,7 +3078,6 @@ if (is_android) {
"test/android/junit/src/org/chromium/base/test/TestListInstrumentationRunListenerTest.java",
"test/android/junit/src/org/chromium/base/test/util/AnnotationProcessingUtilsTest.java",
"test/android/junit/src/org/chromium/base/test/util/DisableIfTest.java",
- "test/android/junit/src/org/chromium/base/test/util/ManualSkipCheckTest.java",
"test/android/junit/src/org/chromium/base/test/util/MinAndroidSdkLevelSkipCheckTest.java",
"test/android/junit/src/org/chromium/base/test/util/RestrictionSkipCheckTest.java",
"test/android/junit/src/org/chromium/base/test/util/SkipCheckTest.java",
@@ -3046,6 +3099,7 @@ if (is_android) {
java_cpp_enum("base_android_java_enums_srcjar") {
sources = [
"android/application_status_listener.h",
+ "android/child_process_binding_types.h",
"android/library_loader/library_load_from_apk_status_codes.h",
"android/library_loader/library_loader_hooks.h",
"memory/memory_pressure_listener.h",
diff --git a/chromium/base/OWNERS b/chromium/base/OWNERS
index 21d1970e808..77641a8cd0d 100644
--- a/chromium/base/OWNERS
+++ b/chromium/base/OWNERS
@@ -17,9 +17,11 @@
# multiple consumers across the codebase, consider placing it in a new directory
# under components/ instead.
+ajwong@chromium.org
danakj@chromium.org
dcheng@chromium.org
gab@chromium.org
+kylechar@chromium.org
mark@chromium.org
thakis@chromium.org
thestig@chromium.org
diff --git a/chromium/base/allocator/BUILD.gn b/chromium/base/allocator/BUILD.gn
index 636a3420da1..c931d0fbd14 100644
--- a/chromium/base/allocator/BUILD.gn
+++ b/chromium/base/allocator/BUILD.gn
@@ -11,6 +11,9 @@ declare_args() {
# e.g. for profiling (it's more rare to profile Debug builds,
# but people sometimes need to do that).
enable_debugallocation = is_debug
+
+ # Provide a way to build tcmalloc with a low memory footprint.
+ use_tcmalloc_small_but_slow = false
}
# This "allocator" meta-target will forward to the default allocator according
@@ -36,6 +39,9 @@ config("tcmalloc_flags") {
if (use_allocator_shim) {
defines += [ "TCMALLOC_DONT_REPLACE_SYSTEM_ALLOC" ]
}
+ if (use_tcmalloc_small_but_slow) {
+ defines += [ "TCMALLOC_SMALL_BUT_SLOW" ]
+ }
if (is_clang) {
cflags = [
# tcmalloc initializes some fields in the wrong order.
@@ -68,7 +74,7 @@ config("tcmalloc_flags") {
if (use_allocator == "tcmalloc") {
# tcmalloc currently won't compile on Android.
source_set("tcmalloc") {
- tcmalloc_dir = "//third_party/tcmalloc/chromium"
+ tcmalloc_dir = "//third_party/tcmalloc/gperftools-2.0/chromium"
# Don't check tcmalloc's includes. These files include various files like
# base/foo.h and they actually refer to tcmalloc's forked copy of base
diff --git a/chromium/base/allocator/allocator_extension.cc b/chromium/base/allocator/allocator_extension.cc
index 9a3d114f729..b6ddbaa872b 100644
--- a/chromium/base/allocator/allocator_extension.cc
+++ b/chromium/base/allocator/allocator_extension.cc
@@ -7,9 +7,9 @@
#include "base/logging.h"
#if defined(USE_TCMALLOC)
-#include "third_party/tcmalloc/chromium/src/gperftools/heap-profiler.h"
-#include "third_party/tcmalloc/chromium/src/gperftools/malloc_extension.h"
-#include "third_party/tcmalloc/chromium/src/gperftools/malloc_hook.h"
+#include "third_party/tcmalloc/gperftools-2.0/chromium/src/gperftools/heap-profiler.h"
+#include "third_party/tcmalloc/gperftools-2.0/chromium/src/gperftools/malloc_extension.h"
+#include "third_party/tcmalloc/gperftools-2.0/chromium/src/gperftools/malloc_hook.h"
#endif
namespace base {
diff --git a/chromium/base/allocator/allocator_interception_mac.mm b/chromium/base/allocator/allocator_interception_mac.mm
index 50202870860..2e40e87e6d2 100644
--- a/chromium/base/allocator/allocator_interception_mac.mm
+++ b/chromium/base/allocator/allocator_interception_mac.mm
@@ -211,7 +211,7 @@ void* oom_killer_memalign_purgeable(struct _malloc_zone_t* zone,
// === Core Foundation CFAllocators ===
bool CanGetContextForCFAllocator() {
- return !base::mac::IsOSLaterThan10_13_DontCallThis();
+ return !base::mac::IsOSLaterThan10_14_DontCallThis();
}
CFAllocatorContext* ContextForCFAllocator(CFAllocatorRef allocator) {
diff --git a/chromium/base/allocator/allocator_shim_default_dispatch_to_tcmalloc.cc b/chromium/base/allocator/allocator_shim_default_dispatch_to_tcmalloc.cc
index 878e8a725c2..71e497981bc 100644
--- a/chromium/base/allocator/allocator_shim_default_dispatch_to_tcmalloc.cc
+++ b/chromium/base/allocator/allocator_shim_default_dispatch_to_tcmalloc.cc
@@ -4,8 +4,8 @@
#include "base/allocator/allocator_shim.h"
#include "base/allocator/allocator_shim_internals.h"
-#include "third_party/tcmalloc/chromium/src/config.h"
-#include "third_party/tcmalloc/chromium/src/gperftools/tcmalloc.h"
+#include "third_party/tcmalloc/gperftools-2.0/chromium/src/config.h"
+#include "third_party/tcmalloc/gperftools-2.0/chromium/src/gperftools/tcmalloc.h"
namespace {
diff --git a/chromium/base/allocator/allocator_shim_override_cpp_symbols.h b/chromium/base/allocator/allocator_shim_override_cpp_symbols.h
index 3313687250f..b1e6ee2509d 100644
--- a/chromium/base/allocator/allocator_shim_override_cpp_symbols.h
+++ b/chromium/base/allocator/allocator_shim_override_cpp_symbols.h
@@ -49,3 +49,11 @@ SHIM_ALWAYS_EXPORT void operator delete[](void* p,
const std::nothrow_t&) __THROW {
ShimCppDelete(p);
}
+
+SHIM_ALWAYS_EXPORT void operator delete(void* p, size_t) __THROW {
+ ShimCppDelete(p);
+}
+
+SHIM_ALWAYS_EXPORT void operator delete[](void* p, size_t) __THROW {
+ ShimCppDelete(p);
+}
diff --git a/chromium/base/allocator/debugallocation_shim.cc b/chromium/base/allocator/debugallocation_shim.cc
index 479cfcad72d..7eb45044698 100644
--- a/chromium/base/allocator/debugallocation_shim.cc
+++ b/chromium/base/allocator/debugallocation_shim.cc
@@ -14,7 +14,7 @@
#endif
#if defined(TCMALLOC_FOR_DEBUGALLOCATION)
-#include "third_party/tcmalloc/chromium/src/debugallocation.cc"
+#include "third_party/tcmalloc/gperftools-2.0/chromium/src/debugallocation.cc"
#else
-#include "third_party/tcmalloc/chromium/src/tcmalloc.cc"
+#include "third_party/tcmalloc/gperftools-2.0/chromium/src/tcmalloc.cc"
#endif
diff --git a/chromium/base/allocator/partition_allocator/address_space_randomization_unittest.cc b/chromium/base/allocator/partition_allocator/address_space_randomization_unittest.cc
index 40f494db992..78746070634 100644
--- a/chromium/base/allocator/partition_allocator/address_space_randomization_unittest.cc
+++ b/chromium/base/allocator/partition_allocator/address_space_randomization_unittest.cc
@@ -191,6 +191,9 @@ void RandomBitCorrelation(int random_bit) {
}
}
+// TODO(crbug.com/811881): These are flaky on Fuchsia
+#if !defined(OS_FUCHSIA)
+
// Tests are fairly slow, so give each random bit its own test.
#define TEST_RANDOM_BIT(BIT) \
TEST(AddressSpaceRandomizationTest, RandomBitCorrelations##BIT) { \
@@ -239,6 +242,8 @@ TEST_RANDOM_BIT(48)
// No platforms have more than 48 address bits.
#endif // defined(ARCH_CPU_64_BITS)
+#endif // defined(OS_FUCHSIA)
+
#undef TEST_RANDOM_BIT
} // namespace base
diff --git a/chromium/base/allocator/partition_allocator/page_allocator_unittest.cc b/chromium/base/allocator/partition_allocator/page_allocator_unittest.cc
index 22c645551ff..fdc6a5e947a 100644
--- a/chromium/base/allocator/partition_allocator/page_allocator_unittest.cc
+++ b/chromium/base/allocator/partition_allocator/page_allocator_unittest.cc
@@ -11,12 +11,12 @@
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
-#if defined(OS_POSIX) && !defined(OS_FUCHSIA)
+#if defined(OS_POSIX)
#include <setjmp.h>
#include <signal.h>
#include <sys/mman.h>
#include <sys/time.h>
-#endif // defined(OS_POSIX) && !defined(OS_FUCHSIA)
+#endif // defined(OS_POSIX)
#if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
@@ -134,7 +134,7 @@ TEST(PageAllocatorTest, AllocAndFreePages) {
}
// Test permission setting on POSIX, where we can set a trap handler.
-#if defined(OS_POSIX) && !defined(OS_FUCHSIA)
+#if defined(OS_POSIX)
namespace {
sigjmp_buf g_continuation;
@@ -217,7 +217,7 @@ TEST(PageAllocatorTest, ReadExecutePages) {
FreePages(buffer, kPageAllocationGranularity);
}
-#endif // defined(OS_POSIX) && !defined(OS_FUCHSIA)
+#endif // defined(OS_POSIX)
} // namespace base
diff --git a/chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc b/chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc
index 4bf6b26d7c3..afff0906b26 100644
--- a/chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc
+++ b/chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc
@@ -48,7 +48,7 @@ bool SetAddressSpaceLimit() {
#if !defined(ARCH_CPU_64_BITS) || !defined(OS_POSIX)
// 32 bits => address space is limited already.
return true;
-#elif defined(OS_POSIX) && !defined(OS_MACOSX) && !defined(OS_FUCHSIA)
+#elif defined(OS_POSIX) && !defined(OS_MACOSX)
// macOS will accept, but not enforce, |RLIMIT_AS| changes. See
// https://crbug.com/435269 and rdar://17576114.
//
@@ -189,7 +189,7 @@ class PartitionAllocTest : public testing::Test {
if (!IsLargeMemoryDevice()) {
LOG(WARNING)
<< "Skipping test on this device because of crbug.com/678782";
- return;
+ LOG(FATAL) << "DoReturnNullTest";
}
ASSERT_TRUE(SetAddressSpaceLimit());
@@ -242,6 +242,7 @@ class PartitionAllocTest : public testing::Test {
generic_allocator.root()->Free(ptrs);
EXPECT_TRUE(ClearAddressSpaceLimit());
+ LOG(FATAL) << "DoReturnNullTest";
}
SizeSpecificPartitionAllocator<kTestMaxAllocation> allocator;
@@ -1282,6 +1283,9 @@ TEST_F(PartitionAllocTest, LostFreePagesBug) {
EXPECT_TRUE(bucket->decommitted_pages_head);
}
+// Death tests misbehave on Android, http://crbug.com/643760.
+#if defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
+
// Unit tests that check if an allocation fails in "return null" mode,
// repeating it doesn't crash, and still returns null. The tests need to
// stress memory subsystem limits to do so, hence they try to allocate
@@ -1295,47 +1299,44 @@ TEST_F(PartitionAllocTest, LostFreePagesBug) {
// they tend to get OOM-killed rather than pass.
// TODO(https://crbug.com/779645): Fuchsia currently sets OS_POSIX, but does
// not provide a working setrlimit().
-#if !defined(ARCH_CPU_64_BITS) || \
- (defined(OS_POSIX) && \
- !(defined(OS_FUCHSIA) || defined(OS_MACOSX) || defined(OS_ANDROID)))
-
-// This is defined as a separate test class because RepeatedReturnNull
-// test exhausts the process memory, and breaks any test in the same
-// class that runs after it.
-class PartitionAllocReturnNullTest : public PartitionAllocTest {};
-
-// Test "return null" for larger, direct-mapped allocations first. As a
-// direct-mapped allocation's pages are unmapped and freed on release, this
-// test is performd first for these "return null" tests in order to leave
-// sufficient unreserved virtual memory around for the later one(s).
-TEST_F(PartitionAllocReturnNullTest, RepeatedReturnNullDirect) {
+//
+// Disable these test on Windows, since they run slower, so tend to timout and
+// cause flake.
+#if !defined(OS_WIN) && \
+ (!defined(ARCH_CPU_64_BITS) || \
+ (defined(OS_POSIX) && \
+ !(defined(OS_FUCHSIA) || defined(OS_MACOSX) || defined(OS_ANDROID))))
+
+// The following four tests wrap a called function in an expect death statement
+// to perform their test, because they are non-hermetic. Specifically they are
+// going to attempt to exhaust the allocatable memory, which leaves the
+// allocator in a bad global state.
+// Performing them as death tests causes them to be forked into their own
+// process, so they won't pollute other tests.
+TEST_F(PartitionAllocDeathTest, RepeatedAllocReturnNullDirect) {
// A direct-mapped allocation size.
- DoReturnNullTest(32 * 1024 * 1024, false);
+ EXPECT_DEATH(DoReturnNullTest(32 * 1024 * 1024, false), "DoReturnNullTest");
}
-// Test "return null" with a 512 kB block size.
-TEST_F(PartitionAllocReturnNullTest, RepeatedReturnNull) {
- // A single-slot but non-direct-mapped allocation size.
- DoReturnNullTest(512 * 1024, false);
+// Repeating above test with Realloc
+TEST_F(PartitionAllocDeathTest, RepeatedReallocReturnNullDirect) {
+ EXPECT_DEATH(DoReturnNullTest(32 * 1024 * 1024, true), "DoReturnNullTest");
}
-// Repeating the above tests using Realloc instead of Alloc.
-class PartitionReallocReturnNullTest : public PartitionAllocTest {};
-
-TEST_F(PartitionReallocReturnNullTest, RepeatedReturnNullDirect) {
- DoReturnNullTest(32 * 1024 * 1024, true);
+// Test "return null" with a 512 kB block size.
+TEST_F(PartitionAllocDeathTest, RepeatedAllocReturnNull) {
+ // A single-slot but non-direct-mapped allocation size.
+ EXPECT_DEATH(DoReturnNullTest(512 * 1024, false), "DoReturnNullTest");
}
-TEST_F(PartitionReallocReturnNullTest, RepeatedReturnNull) {
- DoReturnNullTest(512 * 1024, true);
+// Repeating above test with Realloc.
+TEST_F(PartitionAllocDeathTest, RepeatedReallocReturnNull) {
+ EXPECT_DEATH(DoReturnNullTest(512 * 1024, true), "DoReturnNullTest");
}
#endif // !defined(ARCH_CPU_64_BITS) || (defined(OS_POSIX) &&
// !(defined(OS_FUCHSIA) || defined(OS_MACOSX) || defined(OS_ANDROID)))
-// Death tests misbehave on Android, http://crbug.com/643760.
-#if defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
-
// Make sure that malloc(-1) dies.
// In the past, we had an integer overflow that would alias malloc(-1) to
// malloc(0), which is not good.
@@ -1901,19 +1902,16 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
// for clarity of purpose and for applicability to more architectures.
#if defined(_MIPS_ARCH_LOONGSON)
{
- char* ptr1 = reinterpret_cast<char*>(PartitionAllocGeneric(
- generic_allocator.root(), (32 * kSystemPageSize) - kExtraAllocSize,
- type_name));
+ char* ptr1 = reinterpret_cast<char*>(generic_allocator.root()->Alloc(
+ (32 * kSystemPageSize) - kExtraAllocSize, type_name));
memset(ptr1, 'A', (32 * kSystemPageSize) - kExtraAllocSize);
- PartitionFreeGeneric(generic_allocator.root(), ptr1);
- ptr1 = reinterpret_cast<char*>(PartitionAllocGeneric(
- generic_allocator.root(), (31 * kSystemPageSize) - kExtraAllocSize,
- type_name));
+ generic_allocator.root()->Free(ptr1);
+ ptr1 = reinterpret_cast<char*>(generic_allocator.root()->Alloc(
+ (31 * kSystemPageSize) - kExtraAllocSize, type_name));
{
MockPartitionStatsDumper dumper;
- PartitionDumpStatsGeneric(generic_allocator.root(),
- "mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ generic_allocator.root()->DumpStats("mock_generic_allocator",
+ false /* detailed dump */, &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats =
@@ -1927,12 +1925,12 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
}
CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 30), true);
CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 31), true);
- PartitionPurgeMemoryGeneric(generic_allocator.root(),
- PartitionPurgeDiscardUnusedSystemPages);
+ generic_allocator.root()->PurgeMemory(
+ PartitionPurgeDiscardUnusedSystemPages);
CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 30), true);
CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 31), false);
- PartitionFreeGeneric(generic_allocator.root(), ptr1);
+ generic_allocator.root()->Free(ptr1);
}
#else
{
diff --git a/chromium/base/android/jni_generator/BUILD.gn b/chromium/base/android/jni_generator/BUILD.gn
index f93b4162ed2..915da9755a4 100644
--- a/chromium/base/android/jni_generator/BUILD.gn
+++ b/chromium/base/android/jni_generator/BUILD.gn
@@ -41,7 +41,6 @@ shared_library("jni_sample_lib") {
":jni_sample_native_side",
":sample_jni_registration",
"//base",
- "//build/config:exe_and_shlib_deps",
]
}
diff --git a/chromium/base/android/jni_generator/jni_exception_list.gni b/chromium/base/android/jni_generator/jni_exception_list.gni
index e3ce93adbb7..31d027cab28 100644
--- a/chromium/base/android/jni_generator/jni_exception_list.gni
+++ b/chromium/base/android/jni_generator/jni_exception_list.gni
@@ -4,13 +4,10 @@
import("//device/vr/buildflags/buildflags.gni")
-jni_exception_files = [
- "//base/android/java/src/org/chromium/base/library_loader/LegacyLinker.java",
- "//base/android/java/src/org/chromium/base/library_loader/Linker.java",
- "//base/android/java/src/org/chromium/base/library_loader/ModernLinker.java",
-]
+jni_exception_files =
+ [ "//base/android/java/src/org/chromium/base/library_loader/Linker.java" ]
# Exclude it from JNI registration if VR is not enabled.
if (!enable_vr) {
- jni_exception_files += [ "//chrome/android/java/src/org/chromium/chrome/browser/vr_shell/VrShellDelegate.java" ]
+ jni_exception_files += [ "//chrome/android/java/src/org/chromium/chrome/browser/vr/VrShellDelegate.java" ]
}
diff --git a/chromium/base/android/linker/BUILD.gn b/chromium/base/android/linker/BUILD.gn
index d18f87b35d4..ee6f5691577 100644
--- a/chromium/base/android/linker/BUILD.gn
+++ b/chromium/base/android/linker/BUILD.gn
@@ -8,10 +8,7 @@ assert(is_android)
shared_library("chromium_android_linker") {
sources = [
- "legacy_linker_jni.cc",
- "legacy_linker_jni.h",
"linker_jni.cc",
- "linker_jni.h",
]
# The NDK contains the crazy_linker here:
@@ -19,7 +16,6 @@ shared_library("chromium_android_linker") {
# However, we use our own fork. See bug 384700.
deps = [
"//build:buildflag_header_h",
- "//build/config:exe_and_shlib_deps",
"//third_party/android_crazy_linker",
]
diff --git a/chromium/base/android/orderfile/BUILD.gn b/chromium/base/android/orderfile/BUILD.gn
new file mode 100644
index 00000000000..ff0bfff147f
--- /dev/null
+++ b/chromium/base/android/orderfile/BUILD.gn
@@ -0,0 +1,34 @@
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/config/android/config.gni")
+
+if (use_order_profiling && target_cpu == "arm") {
+ static_library("orderfile_instrumentation") {
+ sources = [
+ "orderfile_instrumentation.cc",
+ "orderfile_instrumentation.h",
+ ]
+ deps = [
+ "//base",
+ ]
+ }
+
+ executable("orderfile_instrumentation_perftest") {
+ testonly = true
+
+ sources = [
+ "orderfile_instrumentation_perftest.cc",
+ ]
+
+ deps = [
+ ":orderfile_instrumentation",
+ "//base",
+ "//testing/gtest",
+ "//testing/perf",
+ ]
+
+ configs -= [ "//build/config/android:default_orderfile_instrumentation" ]
+ }
+}
diff --git a/chromium/base/base_paths_fuchsia.cc b/chromium/base/base_paths_fuchsia.cc
index afe449f2945..6b0c9216f15 100644
--- a/chromium/base/base_paths_fuchsia.cc
+++ b/chromium/base/base_paths_fuchsia.cc
@@ -13,22 +13,9 @@
#include "base/process/process.h"
namespace base {
-namespace {
-
-constexpr char kPackageRoot[] = "/pkg";
-
-} // namespace
base::FilePath GetPackageRoot() {
- base::FilePath path_obj(kPackageRoot);
-
- // Fuchsia's appmgr will set argv[0] to a fully qualified executable path
- // under /pkg for packaged binaries.
- if (path_obj.IsParent(base::CommandLine::ForCurrentProcess()->GetProgram())) {
- return path_obj;
- } else {
- return base::FilePath();
- }
+ return base::FilePath("/pkg");
}
bool PathProviderFuchsia(int key, FilePath* result) {
@@ -39,9 +26,6 @@ bool PathProviderFuchsia(int key, FilePath* result) {
case FILE_EXE:
*result = CommandLine::ForCurrentProcess()->GetProgram();
return true;
- case DIR_SOURCE_ROOT:
- *result = GetPackageRoot();
- return true;
case DIR_APP_DATA:
// TODO(https://crbug.com/840598): Switch to /data when minfs supports
// mmap().
@@ -53,6 +37,7 @@ bool PathProviderFuchsia(int key, FilePath* result) {
*result = FilePath("/data");
return true;
case DIR_ASSETS:
+ case DIR_SOURCE_ROOT:
*result = GetPackageRoot();
return true;
}
diff --git a/chromium/base/base_switches.cc b/chromium/base/base_switches.cc
index 7ce7380dbe0..13c710b19e7 100644
--- a/chromium/base/base_switches.cc
+++ b/chromium/base/base_switches.cc
@@ -116,12 +116,6 @@ const char kEnableCrashReporterForTesting[] =
// given in base/android/library_loader/anchor_functions.h, via madvise and
// changing the library prefetch behavior.
const char kOrderfileMemoryOptimization[] = "orderfile-memory-optimization";
-// Force prefetching of the native library even if otherwise disabled, eg by
-// --orderfile-memory-optimization.
-const char kForceNativePrefetch[] = "force-native-prefetch";
-// If prefetching is enabled, only prefetch the ordered part of the native
-// library. Has no effect if prefetching is disabled.
-const char kNativePrefetchOrderedOnly[] = "native-prefetch-ordered-only";
#endif
} // namespace switches
diff --git a/chromium/base/base_switches.h b/chromium/base/base_switches.h
index 3425e6fad09..4ef070d3f54 100644
--- a/chromium/base/base_switches.h
+++ b/chromium/base/base_switches.h
@@ -44,8 +44,6 @@ extern const char kEnableCrashReporterForTesting[];
#if defined(OS_ANDROID)
extern const char kOrderfileMemoryOptimization[];
-extern const char kForceNativePrefetch[];
-extern const char kNativePrefetchOrderedOnly[];
#endif
} // namespace switches
diff --git a/chromium/base/bind.h b/chromium/base/bind.h
index aab68289662..66d5d82dd09 100644
--- a/chromium/base/bind.h
+++ b/chromium/base/bind.h
@@ -8,6 +8,12 @@
#include <utility>
#include "base/bind_internal.h"
+#include "base/compiler_specific.h"
+#include "build/build_config.h"
+
+#if defined(OS_MACOSX) && !HAS_FEATURE(objc_arc)
+#include "base/mac/scoped_block.h"
+#endif
// -----------------------------------------------------------------------------
// Usage documentation
@@ -452,6 +458,25 @@ static inline internal::IgnoreResultHelper<T> IgnoreResult(T data) {
return internal::IgnoreResultHelper<T>(std::move(data));
}
+#if defined(OS_MACOSX) && !HAS_FEATURE(objc_arc)
+
+// RetainBlock() is used to adapt an Objective-C block when Automated Reference
+// Counting (ARC) is disabled. This is unnecessary when ARC is enabled, as the
+// BindOnce and BindRepeating already support blocks then.
+//
+// EXAMPLE OF RetainBlock():
+//
+// // Wrap the block and bind it to a callback.
+// Callback<void(int)> cb = Bind(RetainBlock(^(int n) { NSLog(@"%d", n); }));
+// cb.Run(1); // Logs "1".
+template <typename R, typename... Args>
+base::mac::ScopedBlock<R (^)(Args...)> RetainBlock(R (^block)(Args...)) {
+ return base::mac::ScopedBlock<R (^)(Args...)>(block,
+ base::scoped_policy::RETAIN);
+}
+
+#endif // defined(OS_MACOSX) && !HAS_FEATURE(objc_arc)
+
} // namespace base
#endif // BASE_BIND_H_
diff --git a/chromium/base/bind_internal.h b/chromium/base/bind_internal.h
index d748f89f834..4ebecfaf771 100644
--- a/chromium/base/bind_internal.h
+++ b/chromium/base/bind_internal.h
@@ -11,11 +11,16 @@
#include <utility>
#include "base/callback_internal.h"
+#include "base/compiler_specific.h"
#include "base/memory/raw_scoped_refptr_mismatch_checker.h"
#include "base/memory/weak_ptr.h"
#include "base/template_util.h"
#include "build/build_config.h"
+#if defined(OS_MACOSX) && !HAS_FEATURE(objc_arc)
+#include "base/mac/scoped_block.h"
+#endif
+
// See base/callback.h for user documentation.
//
//
@@ -433,6 +438,61 @@ struct FunctorTraits<R(__fastcall*)(Args...)> {
#endif // defined(OS_WIN) && !defined(ARCH_CPU_X86_64)
+#if defined(OS_MACOSX)
+
+// Support for Objective-C blocks. There are two implementation depending
+// on whether Automated Reference Counting (ARC) is enabled. When ARC is
+// enabled, then the block itself can be bound as the compiler will ensure
+// its lifetime will be correctly managed. Otherwise, require the block to
+// be wrapped in a base::mac::ScopedBlock (via base::RetainBlock) that will
+// correctly manage the block lifetime.
+//
+// The two implementation ensure that the One Definition Rule (ODR) is not
+// broken (it is not possible to write a template base::RetainBlock that would
+// work correctly both with ARC enabled and disabled).
+
+#if HAS_FEATURE(objc_arc)
+
+template <typename R, typename... Args>
+struct FunctorTraits<R (^)(Args...)> {
+ using RunType = R(Args...);
+ static constexpr bool is_method = false;
+ static constexpr bool is_nullable = true;
+
+ template <typename BlockType, typename... RunArgs>
+ static R Invoke(BlockType&& block, RunArgs&&... args) {
+ // According to LLVM documentation (§ 6.3), "local variables of automatic
+ // storage duration do not have precise lifetime." Use objc_precise_lifetime
+ // to ensure that the Objective-C block is not deallocated until it has
+ // finished executing even if the Callback<> is destroyed during the block
+ // execution.
+ // https://clang.llvm.org/docs/AutomaticReferenceCounting.html#precise-lifetime-semantics
+ __attribute__((objc_precise_lifetime)) R (^scoped_block)(Args...) = block;
+ return scoped_block(std::forward<RunArgs>(args)...);
+ }
+};
+
+#else // HAS_FEATURE(objc_arc)
+
+template <typename R, typename... Args>
+struct FunctorTraits<base::mac::ScopedBlock<R (^)(Args...)>> {
+ using RunType = R(Args...);
+ static constexpr bool is_method = false;
+ static constexpr bool is_nullable = true;
+
+ template <typename BlockType, typename... RunArgs>
+ static R Invoke(BlockType&& block, RunArgs&&... args) {
+ // Copy the block to ensure that the Objective-C block is not deallocated
+ // until it has finished executing even if the Callback<> is destroyed
+ // during the block execution.
+ base::mac::ScopedBlock<R (^)(Args...)> scoped_block(block);
+ return scoped_block.get()(std::forward<RunArgs>(args)...);
+ }
+};
+
+#endif // HAS_FEATURE(objc_arc)
+#endif // defined(OS_MACOSX)
+
// For methods.
template <typename R, typename Receiver, typename... Args>
struct FunctorTraits<R (Receiver::*)(Args...)> {
@@ -579,7 +639,7 @@ struct Invoker;
template <typename StorageType, typename R, typename... UnboundArgs>
struct Invoker<StorageType, R(UnboundArgs...)> {
static R RunOnce(BindStateBase* base,
- PassingTraitsType<UnboundArgs>... unbound_args) {
+ PassingType<UnboundArgs>... unbound_args) {
// Local references to make debugger stepping easier. If in a debugger,
// you really want to warp ahead and step through the
// InvokeHelper<>::MakeItSo() call below.
@@ -592,8 +652,7 @@ struct Invoker<StorageType, R(UnboundArgs...)> {
std::forward<UnboundArgs>(unbound_args)...);
}
- static R Run(BindStateBase* base,
- PassingTraitsType<UnboundArgs>... unbound_args) {
+ static R Run(BindStateBase* base, PassingType<UnboundArgs>... unbound_args) {
// Local references to make debugger stepping easier. If in a debugger,
// you really want to warp ahead and step through the
// InvokeHelper<>::MakeItSo() call below.
diff --git a/chromium/base/callback.h b/chromium/base/callback.h
index 00675be054d..bcda5af587e 100644
--- a/chromium/base/callback.h
+++ b/chromium/base/callback.h
@@ -8,6 +8,8 @@
#ifndef BASE_CALLBACK_H_
#define BASE_CALLBACK_H_
+#include <stddef.h>
+
#include "base/callback_forward.h"
#include "base/callback_internal.h"
@@ -55,9 +57,10 @@ class OnceCallback<R(Args...)> : public internal::CallbackBase {
public:
using RunType = R(Args...);
using PolymorphicInvoke = R (*)(internal::BindStateBase*,
- internal::PassingTraitsType<Args>...);
+ internal::PassingType<Args>...);
constexpr OnceCallback() = default;
+ OnceCallback(std::nullptr_t) = delete;
explicit OnceCallback(internal::BindStateBase* bind_state)
: internal::CallbackBase(bind_state) {}
@@ -102,9 +105,10 @@ class RepeatingCallback<R(Args...)> : public internal::CallbackBaseCopyable {
public:
using RunType = R(Args...);
using PolymorphicInvoke = R (*)(internal::BindStateBase*,
- internal::PassingTraitsType<Args>...);
+ internal::PassingType<Args>...);
constexpr RepeatingCallback() = default;
+ RepeatingCallback(std::nullptr_t) = delete;
explicit RepeatingCallback(internal::BindStateBase* bind_state)
: internal::CallbackBaseCopyable(bind_state) {}
diff --git a/chromium/base/callback_internal.cc b/chromium/base/callback_internal.cc
index 6cef8417835..dd000ca8e29 100644
--- a/chromium/base/callback_internal.cc
+++ b/chromium/base/callback_internal.cc
@@ -33,7 +33,6 @@ BindStateBase::BindStateBase(InvokeFuncStorage polymorphic_invoke,
destructor_(destructor),
is_cancelled_(is_cancelled) {}
-CallbackBase::CallbackBase(CallbackBase&& c) noexcept = default;
CallbackBase& CallbackBase::operator=(CallbackBase&& c) noexcept = default;
CallbackBase::CallbackBase(const CallbackBaseCopyable& c)
: bind_state_(c.bind_state_) {}
@@ -66,21 +65,12 @@ bool CallbackBase::EqualsInternal(const CallbackBase& other) const {
return bind_state_ == other.bind_state_;
}
-CallbackBase::CallbackBase(BindStateBase* bind_state)
- : bind_state_(bind_state ? AdoptRef(bind_state) : nullptr) {
- DCHECK(!bind_state_.get() || bind_state_->HasOneRef());
-}
-
CallbackBase::~CallbackBase() = default;
-CallbackBaseCopyable::CallbackBaseCopyable(const CallbackBaseCopyable& c)
- : CallbackBase(nullptr) {
+CallbackBaseCopyable::CallbackBaseCopyable(const CallbackBaseCopyable& c) {
bind_state_ = c.bind_state_;
}
-CallbackBaseCopyable::CallbackBaseCopyable(CallbackBaseCopyable&& c) noexcept =
- default;
-
CallbackBaseCopyable& CallbackBaseCopyable::operator=(
const CallbackBaseCopyable& c) {
bind_state_ = c.bind_state_;
diff --git a/chromium/base/callback_internal.h b/chromium/base/callback_internal.h
index bfa5a6a6f4b..1215e3e8705 100644
--- a/chromium/base/callback_internal.h
+++ b/chromium/base/callback_internal.h
@@ -31,21 +31,8 @@ struct BindStateBaseRefCountTraits {
static void Destruct(const BindStateBase*);
};
-template <typename T, bool IsScalar = std::is_scalar<T>::value>
-struct PassingTraits;
-
-template <typename T>
-struct PassingTraits<T, false> {
- using Type = T&&;
-};
-
-template <typename T>
-struct PassingTraits<T, true> {
- using Type = T;
-};
-
template <typename T>
-using PassingTraitsType = typename PassingTraits<T>::Type;
+using PassingType = std::conditional_t<std::is_scalar<T>::value, T, T&&>;
// BindStateBase is used to provide an opaque handle that the Callback
// class can use to represent a function object with bound arguments. It
@@ -108,7 +95,7 @@ class BASE_EXPORT BindStateBase
// CallbackBase<Copyable> uses CallbackBase<MoveOnly> for its implementation.
class BASE_EXPORT CallbackBase {
public:
- CallbackBase(CallbackBase&& c) noexcept;
+ inline CallbackBase(CallbackBase&& c) noexcept;
CallbackBase& operator=(CallbackBase&& c) noexcept;
explicit CallbackBase(const CallbackBaseCopyable& c);
@@ -138,7 +125,7 @@ class BASE_EXPORT CallbackBase {
// Allow initializing of |bind_state_| via the constructor to avoid default
// initialization of the scoped_refptr.
- explicit CallbackBase(BindStateBase* bind_state);
+ explicit inline CallbackBase(BindStateBase* bind_state);
InvokeFuncStorage polymorphic_invoke() const {
return bind_state_->polymorphic_invoke_;
@@ -153,12 +140,15 @@ class BASE_EXPORT CallbackBase {
};
constexpr CallbackBase::CallbackBase() = default;
+CallbackBase::CallbackBase(CallbackBase&&) noexcept = default;
+CallbackBase::CallbackBase(BindStateBase* bind_state)
+ : bind_state_(AdoptRef(bind_state)) {}
// CallbackBase<Copyable> is a direct base class of Copyable Callbacks.
class BASE_EXPORT CallbackBaseCopyable : public CallbackBase {
public:
CallbackBaseCopyable(const CallbackBaseCopyable& c);
- CallbackBaseCopyable(CallbackBaseCopyable&& c) noexcept;
+ CallbackBaseCopyable(CallbackBaseCopyable&& c) noexcept = default;
CallbackBaseCopyable& operator=(const CallbackBaseCopyable& c);
CallbackBaseCopyable& operator=(CallbackBaseCopyable&& c) noexcept;
diff --git a/chromium/base/containers/queue.h b/chromium/base/containers/queue.h
index 2d3b480089f..b5bc5c36e53 100644
--- a/chromium/base/containers/queue.h
+++ b/chromium/base/containers/queue.h
@@ -12,7 +12,7 @@
namespace base {
// Provides a definition of base::queue that's like std::queue but uses a
-// base::circular_queue instead of std::deque. Since std::queue is just a
+// base::circular_deque instead of std::deque. Since std::queue is just a
// wrapper for an underlying type, we can just provide a typedef for it that
// defaults to the base circular_deque.
template <class T, class Container = circular_deque<T>>
diff --git a/chromium/base/containers/ring_buffer.h b/chromium/base/containers/ring_buffer.h
index 4e48907c6a0..ca4a48ddc9e 100644
--- a/chromium/base/containers/ring_buffer.h
+++ b/chromium/base/containers/ring_buffer.h
@@ -30,19 +30,25 @@ class RingBuffer {
size_t CurrentIndex() const { return current_index_; }
- // tests if a value was saved to this index
- bool IsFilledIndex(size_t n) const { return BufferIndex(n) < current_index_; }
+ // Returns true if a value was saved to index |n|.
+ bool IsFilledIndex(size_t n) const {
+ return IsFilledIndexByBufferIndex(BufferIndex(n));
+ }
+ // Returns the element at index |n| (% |kSize|).
+ //
// n = 0 returns the oldest value and
// n = bufferSize() - 1 returns the most recent value.
const T& ReadBuffer(size_t n) const {
- DCHECK(IsFilledIndex(n));
- return buffer_[BufferIndex(n)];
+ const size_t buffer_index = BufferIndex(n);
+ CHECK(IsFilledIndexByBufferIndex(buffer_index));
+ return buffer_[buffer_index];
}
T* MutableReadBuffer(size_t n) {
- DCHECK(IsFilledIndex(n));
- return &buffer_[BufferIndex(n)];
+ const size_t buffer_index = BufferIndex(n);
+ CHECK(IsFilledIndexByBufferIndex(buffer_index));
+ return &buffer_[buffer_index];
}
void SaveToBuffer(const T& value) {
@@ -75,7 +81,7 @@ class RingBuffer {
}
operator bool() const {
- return buffer_.IsFilledIndex(index_) && !out_of_range_;
+ return !out_of_range_ && buffer_.IsFilledIndex(index_);
}
private:
@@ -108,6 +114,14 @@ class RingBuffer {
return (current_index_ + n) % kSize;
}
+ // This specialization of |IsFilledIndex| is a micro-optimization that enables
+ // us to do e.g. `CHECK(IsFilledIndex(n))` without calling |BufferIndex|
+ // twice. Since |BufferIndex| involves a % operation, it's not quite free at a
+ // micro-scale.
+ inline bool IsFilledIndexByBufferIndex(size_t buffer_index) const {
+ return buffer_index < current_index_;
+ }
+
T buffer_[kSize];
size_t current_index_;
diff --git a/chromium/base/containers/stack.h b/chromium/base/containers/stack.h
index 1aaa8793c7c..5cf06f82513 100644
--- a/chromium/base/containers/stack.h
+++ b/chromium/base/containers/stack.h
@@ -12,7 +12,7 @@
namespace base {
// Provides a definition of base::stack that's like std::stack but uses a
-// base::circular_queue instead of std::deque. Since std::stack is just a
+// base::circular_deque instead of std::deque. Since std::stack is just a
// wrapper for an underlying type, we can just provide a typedef for it that
// defaults to the base circular_deque.
template <class T, class Container = circular_deque<T>>
diff --git a/chromium/base/debug/activity_tracker.h b/chromium/base/debug/activity_tracker.h
index bfd9f9d45c5..5647d864fcd 100644
--- a/chromium/base/debug/activity_tracker.h
+++ b/chromium/base/debug/activity_tracker.h
@@ -860,6 +860,13 @@ class BASE_EXPORT GlobalActivityTracker {
GlobalActivityTracker* global_tracker = Get();
if (!global_tracker)
return nullptr;
+
+ // It is not safe to use TLS once TLS has been destroyed. This can happen
+ // if code that runs late during thread destruction tries to use a
+ // base::Lock. See https://crbug.com/864589.
+ if (base::ThreadLocalStorage::HasBeenDestroyed())
+ return nullptr;
+
if (lock_allowed)
return global_tracker->GetOrCreateTrackerForCurrentThread();
else
diff --git a/chromium/base/debug/profiler.cc b/chromium/base/debug/profiler.cc
index 1ee948334e1..ef9afb6daa4 100644
--- a/chromium/base/debug/profiler.cc
+++ b/chromium/base/debug/profiler.cc
@@ -19,7 +19,7 @@
// TODO(peria): Enable profiling on Windows.
#if BUILDFLAG(ENABLE_PROFILING) && !defined(NO_TCMALLOC) && !defined(OS_WIN)
-#include "third_party/tcmalloc/chromium/src/gperftools/profiler.h"
+#include "third_party/tcmalloc/gperftools-2.0/chromium/src/gperftools/profiler.h"
#endif
namespace base {
diff --git a/chromium/base/debug/stack_trace_unittest.cc b/chromium/base/debug/stack_trace_unittest.cc
index 959cd533cbc..02f076a2ae8 100644
--- a/chromium/base/debug/stack_trace_unittest.cc
+++ b/chromium/base/debug/stack_trace_unittest.cc
@@ -153,7 +153,7 @@ TEST_F(StackTraceTest, DebugPrintBacktrace) {
}
#endif // !defined(__UCLIBC__)
-#if defined(OS_POSIX) && !defined(OS_ANDROID) && !defined(OS_FUCHSIA)
+#if defined(OS_POSIX) && !defined(OS_ANDROID)
#if !defined(OS_IOS)
static char* newArray() {
// Clang warns about the mismatched new[]/delete if they occur in the same
@@ -253,7 +253,7 @@ TEST_F(StackTraceTest, itoa_r) {
EXPECT_EQ("0688", itoa_r_wrapper(0x688, 128, 16, 4));
EXPECT_EQ("00688", itoa_r_wrapper(0x688, 128, 16, 5));
}
-#endif // defined(OS_POSIX) && !defined(OS_ANDROID) && !defined(OS_FUCHSIA)
+#endif // defined(OS_POSIX) && !defined(OS_ANDROID)
#if BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
diff --git a/chromium/base/debug/task_annotator.cc b/chromium/base/debug/task_annotator.cc
index 2197b859159..18083c120d6 100644
--- a/chromium/base/debug/task_annotator.cc
+++ b/chromium/base/debug/task_annotator.cc
@@ -35,27 +35,27 @@ TaskAnnotator::TaskAnnotator() = default;
TaskAnnotator::~TaskAnnotator() = default;
-void TaskAnnotator::DidQueueTask(const char* queue_function,
- const PendingTask& pending_task) {
+void TaskAnnotator::WillQueueTask(const char* queue_function,
+ PendingTask* pending_task) {
if (queue_function) {
TRACE_EVENT_WITH_FLOW0(TRACE_DISABLED_BY_DEFAULT("toplevel.flow"),
queue_function,
- TRACE_ID_MANGLE(GetTaskTraceID(pending_task)),
+ TRACE_ID_MANGLE(GetTaskTraceID(*pending_task)),
TRACE_EVENT_FLAG_FLOW_OUT);
}
- // TODO(https://crbug.com/826902): Fix callers that invoke DidQueueTask()
+ // TODO(https://crbug.com/826902): Fix callers that invoke WillQueueTask()
// twice for the same PendingTask.
// DCHECK(!pending_task.task_backtrace[0])
// << "Task backtrace was already set, task posted twice??";
- if (!pending_task.task_backtrace[0]) {
+ if (!pending_task->task_backtrace[0]) {
const PendingTask* parent_task = GetTLSForCurrentPendingTask()->Get();
if (parent_task) {
- pending_task.task_backtrace[0] =
+ pending_task->task_backtrace[0] =
parent_task->posted_from.program_counter();
std::copy(parent_task->task_backtrace.begin(),
parent_task->task_backtrace.end() - 1,
- pending_task.task_backtrace.begin() + 1);
+ pending_task->task_backtrace.begin() + 1);
}
}
}
diff --git a/chromium/base/debug/task_annotator.h b/chromium/base/debug/task_annotator.h
index f53d02c2c39..fedca7d599c 100644
--- a/chromium/base/debug/task_annotator.h
+++ b/chromium/base/debug/task_annotator.h
@@ -28,12 +28,12 @@ class BASE_EXPORT TaskAnnotator {
TaskAnnotator();
~TaskAnnotator();
- // Called to indicate that a task has been queued to run in the future.
- // |queue_function| is used as the trace flow event name. |queue_function| can
- // be null if the caller doesn't want trace flow events logged to
- // toplevel.flow.
- void DidQueueTask(const char* queue_function,
- const PendingTask& pending_task);
+ // Called to indicate that a task is about to be queued to run in the future,
+ // giving one last chance for this TaskAnnotator to add metadata to
+ // |pending_task| before it is moved into the queue. |queue_function| is used
+ // as the trace flow event name. |queue_function| can be null if the caller
+ // doesn't want trace flow events logged to toplevel.flow.
+ void WillQueueTask(const char* queue_function, PendingTask* pending_task);
// Run a previously queued task. |queue_function| should match what was
// passed into |DidQueueTask| for this task.
diff --git a/chromium/base/debug/task_annotator_unittest.cc b/chromium/base/debug/task_annotator_unittest.cc
index 51a5d3295c8..2f07bbd15fc 100644
--- a/chromium/base/debug/task_annotator_unittest.cc
+++ b/chromium/base/debug/task_annotator_unittest.cc
@@ -38,7 +38,7 @@ TEST(TaskAnnotatorTest, QueueAndRunTask) {
PendingTask pending_task(FROM_HERE, BindOnce(&TestTask, &result));
TaskAnnotator annotator;
- annotator.DidQueueTask("TaskAnnotatorTest::Queue", pending_task);
+ annotator.WillQueueTask("TaskAnnotatorTest::Queue", &pending_task);
EXPECT_EQ(0, result);
annotator.RunTask("TaskAnnotatorTest::Queue", &pending_task);
EXPECT_EQ(123, result);
diff --git a/chromium/base/environment_unittest.cc b/chromium/base/environment_unittest.cc
index 23aec511812..7cb8c9c4fbd 100644
--- a/chromium/base/environment_unittest.cc
+++ b/chromium/base/environment_unittest.cc
@@ -16,7 +16,13 @@ namespace base {
namespace {
+// PATH env variable is not set on Fuchsia by default, while PWD is not set on
+// Windows.
+#if defined(OS_FUCHSIA)
+constexpr char kValidEnvironmentVariable[] = "PWD";
+#else
constexpr char kValidEnvironmentVariable[] = "PATH";
+#endif
} // namespace
diff --git a/chromium/base/files/file.cc b/chromium/base/files/file.cc
index 1a4ee370311..e8934b1cdc3 100644
--- a/chromium/base/files/file.cc
+++ b/chromium/base/files/file.cc
@@ -36,11 +36,13 @@ File::File(const FilePath& path, uint32_t flags)
}
#endif
-File::File(PlatformFile platform_file)
+File::File(PlatformFile platform_file) : File(platform_file, false) {}
+
+File::File(PlatformFile platform_file, bool async)
: file_(platform_file),
error_details_(FILE_OK),
created_(false),
- async_(false) {
+ async_(async) {
#if defined(OS_POSIX) || defined(OS_FUCHSIA)
DCHECK_GE(platform_file, -1);
#endif
@@ -64,15 +66,6 @@ File::~File() {
Close();
}
-// static
-File File::CreateForAsyncHandle(PlatformFile platform_file) {
- File file(platform_file);
- // It would be nice if we could validate that |platform_file| was opened with
- // FILE_FLAG_OVERLAPPED on Windows but this doesn't appear to be possible.
- file.async_ = true;
- return file;
-}
-
File& File::operator=(File&& other) {
Close();
SetPlatformFile(other.TakePlatformFile());
diff --git a/chromium/base/files/file.h b/chromium/base/files/file.h
index c3a31d84f11..30f40532147 100644
--- a/chromium/base/files/file.h
+++ b/chromium/base/files/file.h
@@ -25,9 +25,9 @@
namespace base {
#if defined(OS_BSD) || defined(OS_MACOSX) || defined(OS_NACL) || \
- defined(OS_ANDROID) && __ANDROID_API__ < 21
+ defined(OS_FUCHSIA) || (defined(OS_ANDROID) && __ANDROID_API__ < 21)
typedef struct stat stat_wrapper_t;
-#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+#elif defined(OS_POSIX)
typedef struct stat64 stat_wrapper_t;
#endif
@@ -153,9 +153,14 @@ class BASE_EXPORT File {
// |path| contains path traversal ('..') components.
File(const FilePath& path, uint32_t flags);
- // Takes ownership of |platform_file|.
+ // Takes ownership of |platform_file| and sets async to false.
explicit File(PlatformFile platform_file);
+ // Takes ownership of |platform_file| and sets async to the given value.
+ // This constructor exists because on Windows you can't check if platform_file
+ // is async or not.
+ File(PlatformFile platform_file, bool async);
+
// Creates an object with a specific error_details code.
explicit File(Error error_details);
@@ -163,9 +168,6 @@ class BASE_EXPORT File {
~File();
- // Takes ownership of |platform_file|.
- static File CreateForAsyncHandle(PlatformFile platform_file);
-
File& operator=(File&& other);
// Creates or opens the given file.
@@ -300,19 +302,21 @@ class BASE_EXPORT File {
bool async() const { return async_; }
#if defined(OS_WIN)
- // Sets or clears the DeleteFile disposition on the handle. Returns true if
+ // Sets or clears the DeleteFile disposition on the file. Returns true if
// the disposition was set or cleared, as indicated by |delete_on_close|.
//
- // Microsoft Windows deletes a file only when the last handle to the
- // underlying kernel object is closed when the DeleteFile disposition has been
- // set by any handle holder. This disposition is be set by:
+ // Microsoft Windows deletes a file only when the DeleteFile disposition is
+ // set on a file when the last handle to the last underlying kernel File
+ // object is closed. This disposition is be set by:
// - Calling the Win32 DeleteFile function with the path to a file.
- // - Opening/creating a file with FLAG_DELETE_ON_CLOSE.
+ // - Opening/creating a file with FLAG_DELETE_ON_CLOSE and then closing all
+ // handles to that File object.
// - Opening/creating a file with FLAG_CAN_DELETE_ON_CLOSE and subsequently
// calling DeleteOnClose(true).
//
// In all cases, all pre-existing handles to the file must have been opened
- // with FLAG_SHARE_DELETE.
+ // with FLAG_SHARE_DELETE. Once the disposition has been set by any of the
+ // above means, no new File objects can be created for the file.
//
// So:
// - Use FLAG_SHARE_DELETE when creating/opening a file to allow another
@@ -321,6 +325,9 @@ class BASE_EXPORT File {
// using this permission doesn't provide any protections.)
// - Use FLAG_DELETE_ON_CLOSE for any file that is to be deleted after use.
// The OS will ensure it is deleted even in the face of process termination.
+ // Note that it's possible for deletion to be cancelled via another File
+ // object referencing the same file using DeleteOnClose(false) to clear the
+ // DeleteFile disposition after the original File is closed.
// - Use FLAG_CAN_DELETE_ON_CLOSE in conjunction with DeleteOnClose() to alter
// the DeleteFile disposition on an open handle. This fine-grained control
// allows for marking a file for deletion during processing so that it is
@@ -372,4 +379,3 @@ class BASE_EXPORT File {
} // namespace base
#endif // BASE_FILES_FILE_H_
-
diff --git a/chromium/base/files/file_path_watcher_unittest.cc b/chromium/base/files/file_path_watcher_unittest.cc
index 2cc2e5846e2..2530b271af3 100644
--- a/chromium/base/files/file_path_watcher_unittest.cc
+++ b/chromium/base/files/file_path_watcher_unittest.cc
@@ -65,7 +65,8 @@ class NotificationCollector
delegates_.insert(delegate);
}
- void Reset() {
+ void Reset(base::OnceClosure signal_closure) {
+ signal_closure_ = std::move(signal_closure);
signaled_.clear();
}
@@ -84,9 +85,8 @@ class NotificationCollector
signaled_.insert(delegate);
// Check whether all delegates have been signaled.
- if (signaled_ == delegates_)
- task_runner_->PostTask(FROM_HERE,
- RunLoop::QuitCurrentWhenIdleClosureDeprecated());
+ if (signal_closure_ && signaled_ == delegates_)
+ std::move(signal_closure_).Run();
}
// Set of registered delegates.
@@ -97,6 +97,9 @@ class NotificationCollector
// The loop we should break after all delegates signaled.
scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+
+ // Closure to run when all delegates have signaled.
+ base::OnceClosure signal_closure_;
};
class TestDelegateBase : public SupportsWeakPtr<TestDelegateBase> {
@@ -185,13 +188,16 @@ class FilePathWatcherTest : public testing::Test {
bool recursive_watch) WARN_UNUSED_RESULT;
bool WaitForEvents() WARN_UNUSED_RESULT {
- collector_->Reset();
+ return WaitForEventsWithTimeout(TestTimeouts::action_timeout());
+ }
+ bool WaitForEventsWithTimeout(TimeDelta timeout) WARN_UNUSED_RESULT {
RunLoop run_loop;
+ collector_->Reset(run_loop.QuitClosure());
+
// Make sure we timeout if we don't get notified.
ThreadTaskRunnerHandle::Get()->PostDelayedTask(
- FROM_HERE, run_loop.QuitWhenIdleClosure(),
- TestTimeouts::action_timeout());
+ FROM_HERE, run_loop.QuitClosure(), timeout);
run_loop.Run();
return collector_->Success();
}
@@ -272,40 +278,37 @@ TEST_F(FilePathWatcherTest, DeletedFile) {
// Deletes the FilePathWatcher when it's notified.
class Deleter : public TestDelegateBase {
public:
- Deleter(FilePathWatcher* watcher, MessageLoop* loop)
- : watcher_(watcher),
- loop_(loop) {
- }
+ explicit Deleter(base::OnceClosure done_closure)
+ : watcher_(std::make_unique<FilePathWatcher>()),
+ done_closure_(std::move(done_closure)) {}
~Deleter() override = default;
void OnFileChanged(const FilePath&, bool) override {
watcher_.reset();
- loop_->task_runner()->PostTask(
- FROM_HERE, RunLoop::QuitCurrentWhenIdleClosureDeprecated());
+ std::move(done_closure_).Run();
}
FilePathWatcher* watcher() const { return watcher_.get(); }
private:
std::unique_ptr<FilePathWatcher> watcher_;
- MessageLoop* loop_;
+ base::OnceClosure done_closure_;
DISALLOW_COPY_AND_ASSIGN(Deleter);
};
// Verify that deleting a watcher during the callback doesn't crash.
TEST_F(FilePathWatcherTest, DeleteDuringNotify) {
- FilePathWatcher* watcher = new FilePathWatcher;
- // Takes ownership of watcher.
- std::unique_ptr<Deleter> deleter(new Deleter(watcher, &loop_));
- ASSERT_TRUE(SetupWatch(test_file(), watcher, deleter.get(), false));
+ base::RunLoop run_loop;
+ Deleter deleter(run_loop.QuitClosure());
+ ASSERT_TRUE(SetupWatch(test_file(), deleter.watcher(), &deleter, false));
ASSERT_TRUE(WriteFile(test_file(), "content"));
- ASSERT_TRUE(WaitForEvents());
+ run_loop.Run();
// We win if we haven't crashed yet.
// Might as well double-check it got deleted, too.
- ASSERT_TRUE(deleter->watcher() == nullptr);
+ ASSERT_TRUE(deleter.watcher() == nullptr);
}
// Verify that deleting the watcher works even if there is a pending
@@ -540,7 +543,7 @@ TEST_F(FilePathWatcherTest, RecursiveWatch) {
ASSERT_TRUE(WaitForEvents());
}
-#if defined(OS_POSIX) && !defined(OS_ANDROID) && !defined(OS_FUCHSIA)
+#if defined(OS_POSIX) && !defined(OS_ANDROID)
// Apps cannot create symlinks on Android in /sdcard as /sdcard uses the
// "fuse" file system, while /data uses "ext4". Running these tests in /data
// would be preferable and allow testing file attributes and symlinks.
@@ -585,7 +588,7 @@ TEST_F(FilePathWatcherTest, RecursiveWithSymLink) {
ASSERT_TRUE(WriteFile(target2_file, "content"));
ASSERT_TRUE(WaitForEvents());
}
-#endif // defined(OS_POSIX) && !defined(OS_ANDROID) && !defined(OS_FUCHSIA)
+#endif // defined(OS_POSIX) && !defined(OS_ANDROID)
TEST_F(FilePathWatcherTest, MoveChild) {
FilePathWatcher file_watcher;
@@ -854,10 +857,7 @@ TEST_F(FilePathWatcherTest, DirAttributesChanged) {
// We should not get notified in this case as it hasn't affected our ability
// to access the file.
ASSERT_TRUE(ChangeFilePermissions(test_dir1, Read, false));
- loop_.task_runner()->PostDelayedTask(
- FROM_HERE, RunLoop::QuitCurrentWhenIdleClosureDeprecated(),
- TestTimeouts::tiny_timeout());
- ASSERT_FALSE(WaitForEvents());
+ ASSERT_FALSE(WaitForEventsWithTimeout(TestTimeouts::tiny_timeout()));
ASSERT_TRUE(ChangeFilePermissions(test_dir1, Read, true));
// We should get notified in this case because filepathwatcher can no
diff --git a/chromium/base/files/file_posix.cc b/chromium/base/files/file_posix.cc
index 45cef58511c..83018f2f343 100644
--- a/chromium/base/files/file_posix.cc
+++ b/chromium/base/files/file_posix.cc
@@ -31,7 +31,7 @@ static_assert(File::FROM_BEGIN == SEEK_SET && File::FROM_CURRENT == SEEK_CUR &&
namespace {
#if defined(OS_BSD) || defined(OS_MACOSX) || defined(OS_NACL) || \
- defined(OS_ANDROID) && __ANDROID_API__ < 21
+ defined(OS_FUCHSIA) || (defined(OS_ANDROID) && __ANDROID_API__ < 21)
int CallFstat(int fd, stat_wrapper_t *sb) {
AssertBlockingAllowed();
return fstat(fd, sb);
@@ -395,10 +395,7 @@ File File::Duplicate() const {
if (other_fd == -1)
return File(File::GetLastFileError());
- File other(other_fd);
- if (async())
- other.async_ = true;
- return other;
+ return File(other_fd, async());
}
// Static.
diff --git a/chromium/base/files/file_proxy_unittest.cc b/chromium/base/files/file_proxy_unittest.cc
index 20bb4896b4b..cb689db2f63 100644
--- a/chromium/base/files/file_proxy_unittest.cc
+++ b/chromium/base/files/file_proxy_unittest.cc
@@ -310,8 +310,10 @@ TEST_F(FileProxyTest, WriteAndFlush) {
}
}
-#if defined(OS_ANDROID)
+#if defined(OS_ANDROID) || defined(OS_FUCHSIA)
// Flaky on Android, see http://crbug.com/489602
+// TODO(crbug.com/851734): Implementation depends on stat, which is not
+// implemented on Fuchsia
#define MAYBE_SetTimes DISABLED_SetTimes
#else
#define MAYBE_SetTimes SetTimes
diff --git a/chromium/base/files/file_unittest.cc b/chromium/base/files/file_unittest.cc
index 65bf62d939d..8a5732280b7 100644
--- a/chromium/base/files/file_unittest.cc
+++ b/chromium/base/files/file_unittest.cc
@@ -745,4 +745,18 @@ TEST(FileTest, NoDeleteOnCloseWithMappedFile) {
file.Close();
ASSERT_TRUE(base::PathExists(file_path));
}
+
+// Check that we handle the async bit being set incorrectly in a sane way.
+TEST(FileTest, UseSyncApiWithAsyncFile) {
+ base::ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ FilePath file_path = temp_dir.GetPath().AppendASCII("file");
+
+ File file(file_path, base::File::FLAG_CREATE | base::File::FLAG_WRITE |
+ base::File::FLAG_ASYNC);
+ File lying_file(file.TakePlatformFile(), false /* async */);
+ ASSERT_TRUE(lying_file.IsValid());
+
+ ASSERT_EQ(lying_file.WriteAtCurrentPos("12345", 5), -1);
+}
#endif // defined(OS_WIN)
diff --git a/chromium/base/files/file_util.h b/chromium/base/files/file_util.h
index 1ba93681727..456962a42fd 100644
--- a/chromium/base/files/file_util.h
+++ b/chromium/base/files/file_util.h
@@ -235,6 +235,15 @@ BASE_EXPORT bool SetPosixFilePermissions(const FilePath& path, int mode);
BASE_EXPORT bool ExecutableExistsInPath(Environment* env,
const FilePath::StringType& executable);
+#if defined(OS_LINUX) || defined(OS_AIX)
+// Determine if files under a given |path| can be mapped and then mprotect'd
+// PROT_EXEC. This depends on the mount options used for |path|, which vary
+// among different Linux distributions and possibly local configuration. It also
+// depends on details of kernel--ChromeOS uses the noexec option for /dev/shm
+// but its kernel allows mprotect with PROT_EXEC anyway.
+BASE_EXPORT bool IsPathExecutable(const FilePath& path);
+#endif // OS_LINUX || OS_AIX
+
#endif // OS_POSIX
// Returns true if the given directory is empty
diff --git a/chromium/base/files/file_util_posix.cc b/chromium/base/files/file_util_posix.cc
index d8a0ae05e3d..e888c82e078 100644
--- a/chromium/base/files/file_util_posix.cc
+++ b/chromium/base/files/file_util_posix.cc
@@ -69,7 +69,7 @@ namespace base {
namespace {
#if defined(OS_BSD) || defined(OS_MACOSX) || defined(OS_NACL) || \
- defined(OS_ANDROID) && __ANDROID_API__ < 21
+ defined(OS_FUCHSIA) || (defined(OS_ANDROID) && __ANDROID_API__ < 21)
int CallStat(const char* path, stat_wrapper_t* sb) {
AssertBlockingAllowed();
return stat(path, sb);
@@ -138,35 +138,6 @@ std::string TempFileName() {
#endif
}
-#if defined(OS_LINUX) || defined(OS_AIX)
-// Determine if /dev/shm files can be mapped and then mprotect'd PROT_EXEC.
-// This depends on the mount options used for /dev/shm, which vary among
-// different Linux distributions and possibly local configuration. It also
-// depends on details of kernel--ChromeOS uses the noexec option for /dev/shm
-// but its kernel allows mprotect with PROT_EXEC anyway.
-bool DetermineDevShmExecutable() {
- bool result = false;
- FilePath path;
-
- ScopedFD fd(
- CreateAndOpenFdForTemporaryFileInDir(FilePath("/dev/shm"), &path));
- if (fd.is_valid()) {
- DeleteFile(path, false);
- long sysconf_result = sysconf(_SC_PAGESIZE);
- CHECK_GE(sysconf_result, 0);
- size_t pagesize = static_cast<size_t>(sysconf_result);
- CHECK_GE(sizeof(pagesize), sizeof(sysconf_result));
- void* mapping = mmap(nullptr, pagesize, PROT_READ, MAP_SHARED, fd.get(), 0);
- if (mapping != MAP_FAILED) {
- if (mprotect(mapping, pagesize, PROT_READ | PROT_EXEC) == 0)
- result = true;
- munmap(mapping, pagesize);
- }
- }
- return result;
-}
-#endif // defined(OS_LINUX) || defined(OS_AIX)
-
bool AdvanceEnumeratorWithStat(FileEnumerator* traversal,
FilePath* out_next_path,
struct stat* out_next_stat) {
@@ -1016,7 +987,8 @@ bool GetShmemTempDir(bool executable, FilePath* path) {
#endif
bool use_dev_shm = true;
if (executable) {
- static const bool s_dev_shm_executable = DetermineDevShmExecutable();
+ static const bool s_dev_shm_executable =
+ IsPathExecutable(FilePath("/dev/shm"));
use_dev_shm = s_dev_shm_executable;
}
if (use_dev_shm && !disable_dev_shm) {
@@ -1083,4 +1055,28 @@ bool MoveUnsafe(const FilePath& from_path, const FilePath& to_path) {
} // namespace internal
#endif // !defined(OS_NACL_NONSFI)
+
+#if defined(OS_LINUX) || defined(OS_AIX)
+BASE_EXPORT bool IsPathExecutable(const FilePath& path) {
+ bool result = false;
+ FilePath tmp_file_path;
+
+ ScopedFD fd(CreateAndOpenFdForTemporaryFileInDir(path, &tmp_file_path));
+ if (fd.is_valid()) {
+ DeleteFile(tmp_file_path, false);
+ long sysconf_result = sysconf(_SC_PAGESIZE);
+ CHECK_GE(sysconf_result, 0);
+ size_t pagesize = static_cast<size_t>(sysconf_result);
+ CHECK_GE(sizeof(pagesize), sizeof(sysconf_result));
+ void* mapping = mmap(nullptr, pagesize, PROT_READ, MAP_SHARED, fd.get(), 0);
+ if (mapping != MAP_FAILED) {
+ if (mprotect(mapping, pagesize, PROT_READ | PROT_EXEC) == 0)
+ result = true;
+ munmap(mapping, pagesize);
+ }
+ }
+ return result;
+}
+#endif // defined(OS_LINUX) || defined(OS_AIX)
+
} // namespace base
diff --git a/chromium/base/files/file_util_unittest.cc b/chromium/base/files/file_util_unittest.cc
index a89e1b32789..68abc7c52d9 100644
--- a/chromium/base/files/file_util_unittest.cc
+++ b/chromium/base/files/file_util_unittest.cc
@@ -4,6 +4,7 @@
#include <stddef.h>
#include <stdint.h>
+#include <stdio.h>
#include <algorithm>
#include <fstream>
@@ -25,6 +26,7 @@
#include "base/files/file_util.h"
#include "base/files/scoped_file.h"
#include "base/files/scoped_temp_dir.h"
+#include "base/guid.h"
#include "base/macros.h"
#include "base/path_service.h"
#include "base/strings/string_util.h"
@@ -34,6 +36,7 @@
#include "base/test/test_file_util.h"
#include "base/test/test_timeouts.h"
#include "base/threading/platform_thread.h"
+#include "base/threading/thread.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "testing/multiprocess_func_list.h"
@@ -2419,7 +2422,14 @@ TEST_F(FileUtilTest, CreateAndOpenTemporaryFileTest) {
}
}
-TEST_F(FileUtilTest, FileToFILE) {
+#if defined(OS_FUCHSIA)
+// TODO(crbug.com/851747): Re-enable when the Fuchsia-side fix for fdopen has
+// been rolled into Chromium.
+#define MAYBE_FileToFILE DISABLED_FileToFILE
+#else
+#define MAYBE_FileToFILE FileToFILE
+#endif
+TEST_F(FileUtilTest, MAYBE_FileToFILE) {
File file;
FILE* stream = FileToFILE(std::move(file), "w");
EXPECT_FALSE(stream);
@@ -3169,7 +3179,7 @@ TEST_F(FileUtilTest, ReadFileToStringWithNamedPipe) {
}
#endif // defined(OS_WIN)
-#if defined(OS_POSIX) && !defined(OS_MACOSX) && !defined(OS_FUCHSIA)
+#if defined(OS_POSIX) && !defined(OS_MACOSX)
TEST_F(FileUtilTest, ReadFileToStringWithProcFileSystem) {
FilePath file_path("/proc/cpuinfo");
std::string data = "temp";
@@ -3179,23 +3189,15 @@ TEST_F(FileUtilTest, ReadFileToStringWithProcFileSystem) {
data = "temp";
EXPECT_FALSE(ReadFileToStringWithMaxSize(file_path, &data, 2));
-#if defined(OS_ANDROID)
- EXPECT_EQ("Pr", data);
-#else
- EXPECT_EQ("pr", data);
-#endif
+ EXPECT_TRUE(EqualsCaseInsensitiveASCII("pr", data));
data = "temp";
EXPECT_FALSE(ReadFileToStringWithMaxSize(file_path, &data, 4));
-#if defined(OS_ANDROID)
- EXPECT_EQ("Proc", data);
-#else
- EXPECT_EQ("proc", data);
-#endif
+ EXPECT_TRUE(EqualsCaseInsensitiveASCII("proc", data));
EXPECT_FALSE(ReadFileToStringWithMaxSize(file_path, nullptr, 4));
}
-#endif // defined(OS_POSIX) && !defined(OS_MACOSX) && !defined(OS_FUCHSIA)
+#endif // defined(OS_POSIX) && !defined(OS_MACOSX)
TEST_F(FileUtilTest, ReadFileToStringWithLargeFile) {
std::string data(kLargeFileSize, 'c');
@@ -3637,6 +3639,70 @@ TEST_F(FileUtilTest, NonExistentContentUriTest) {
}
#endif
+// Test that temp files obtained racily are all unique (no interference between
+// threads). Mimics file operations in DoLaunchChildTestProcess() to rule out
+// thread-safety issues @ https://crbug.com/826408#c17.
+#if defined(OS_FUCHSIA)
+// TODO(crbug.com/844416): Too slow to run on infra due to QEMU overloads.
+#define MAYBE_MultiThreadedTempFiles DISABLED_MultiThreadedTempFiles
+#else
+#define MAYBE_MultiThreadedTempFiles MultiThreadedTempFiles
+#endif
+TEST(FileUtilMultiThreadedTest, MAYBE_MultiThreadedTempFiles) {
+ constexpr int kNumThreads = 64;
+ constexpr int kNumWritesPerThread = 32;
+
+ std::unique_ptr<Thread> threads[kNumThreads];
+ for (auto& thread : threads) {
+ thread = std::make_unique<Thread>("test worker");
+ thread->Start();
+ }
+
+ // Wait until all threads are started for max parallelism.
+ for (auto& thread : threads)
+ thread->WaitUntilThreadStarted();
+
+ const RepeatingClosure open_write_close_read = BindRepeating([]() {
+ FilePath output_filename;
+ ScopedFILE output_file(CreateAndOpenTemporaryFile(&output_filename));
+ EXPECT_TRUE(output_file);
+
+ const std::string content = GenerateGUID();
+#if defined(OS_WIN)
+ HANDLE handle =
+ reinterpret_cast<HANDLE>(_get_osfhandle(_fileno(output_file.get())));
+ DWORD bytes_written = 0;
+ ::WriteFile(handle, content.c_str(), content.length(), &bytes_written,
+ NULL);
+#else
+ size_t bytes_written =
+ ::write(::fileno(output_file.get()), content.c_str(), content.length());
+#endif
+ EXPECT_EQ(content.length(), bytes_written);
+ ::fflush(output_file.get());
+ output_file.reset();
+
+ std::string output_file_contents;
+ EXPECT_TRUE(ReadFileToString(output_filename, &output_file_contents))
+ << output_filename;
+
+ EXPECT_EQ(content, output_file_contents);
+
+ DeleteFile(output_filename, false);
+ });
+
+ // Post tasks to each thread in a round-robin fashion to ensure as much
+ // parallelism as possible.
+ for (int i = 0; i < kNumWritesPerThread; ++i) {
+ for (auto& thread : threads) {
+ thread->task_runner()->PostTask(FROM_HERE, open_write_close_read);
+ }
+ }
+
+ for (auto& thread : threads)
+ thread->Stop();
+}
+
#if defined(OS_POSIX) || defined(OS_FUCHSIA)
TEST(ScopedFD, ScopedFDDoesClose) {
diff --git a/chromium/base/files/file_win.cc b/chromium/base/files/file_win.cc
index d7bffc3b512..82cd7e8b429 100644
--- a/chromium/base/files/file_win.cc
+++ b/chromium/base/files/file_win.cc
@@ -269,10 +269,7 @@ File File::Duplicate() const {
return File(GetLastFileError());
}
- File other(other_handle);
- if (async())
- other.async_ = true;
- return other;
+ return File(other_handle, async());
}
bool File::DeleteOnClose(bool delete_on_close) {
diff --git a/chromium/base/files/important_file_writer.cc b/chromium/base/files/important_file_writer.cc
index 235bb8d36b0..6342b0e50ae 100644
--- a/chromium/base/files/important_file_writer.cc
+++ b/chromium/base/files/important_file_writer.cc
@@ -308,7 +308,7 @@ void ImportantFileWriter::ClearPendingWrite() {
serializer_ = nullptr;
}
-void ImportantFileWriter::SetTimerForTesting(Timer* timer_override) {
+void ImportantFileWriter::SetTimerForTesting(OneShotTimer* timer_override) {
timer_override_ = timer_override;
}
diff --git a/chromium/base/files/important_file_writer.h b/chromium/base/files/important_file_writer.h
index 08a7ee34bee..f0cbfd228e6 100644
--- a/chromium/base/files/important_file_writer.h
+++ b/chromium/base/files/important_file_writer.h
@@ -114,14 +114,13 @@ class BASE_EXPORT ImportantFileWriter {
}
// Overrides the timer to use for scheduling writes with |timer_override|.
- void SetTimerForTesting(Timer* timer_override);
+ void SetTimerForTesting(OneShotTimer* timer_override);
private:
- const Timer& timer() const {
- return timer_override_ ? const_cast<const Timer&>(*timer_override_)
- : timer_;
+ const OneShotTimer& timer() const {
+ return timer_override_ ? *timer_override_ : timer_;
}
- Timer& timer() { return timer_override_ ? *timer_override_ : timer_; }
+ OneShotTimer& timer() { return timer_override_ ? *timer_override_ : timer_; }
void ClearPendingWrite();
@@ -139,7 +138,7 @@ class BASE_EXPORT ImportantFileWriter {
OneShotTimer timer_;
// An override for |timer_| used for testing.
- Timer* timer_override_ = nullptr;
+ OneShotTimer* timer_override_ = nullptr;
// Serializer which will provide the data to be saved.
DataSerializer* serializer_;
diff --git a/chromium/base/files/important_file_writer_unittest.cc b/chromium/base/files/important_file_writer_unittest.cc
index 493fb36024b..5dddc71456c 100644
--- a/chromium/base/files/important_file_writer_unittest.cc
+++ b/chromium/base/files/important_file_writer_unittest.cc
@@ -16,7 +16,7 @@
#include "base/message_loop/message_loop.h"
#include "base/run_loop.h"
#include "base/single_thread_task_runner.h"
-#include "base/test/histogram_tester.h"
+#include "base/test/metrics/histogram_tester.h"
#include "base/threading/thread.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/time/time.h"
@@ -231,7 +231,7 @@ TEST_F(ImportantFileWriterTest, CallbackRunsOnWriterThread) {
TEST_F(ImportantFileWriterTest, ScheduleWrite) {
constexpr TimeDelta kCommitInterval = TimeDelta::FromSeconds(12345);
- MockTimer timer(true, false);
+ MockOneShotTimer timer;
ImportantFileWriter writer(file_, ThreadTaskRunnerHandle::Get(),
kCommitInterval);
writer.SetTimerForTesting(&timer);
@@ -250,7 +250,7 @@ TEST_F(ImportantFileWriterTest, ScheduleWrite) {
}
TEST_F(ImportantFileWriterTest, DoScheduledWrite) {
- MockTimer timer(true, false);
+ MockOneShotTimer timer;
ImportantFileWriter writer(file_, ThreadTaskRunnerHandle::Get());
writer.SetTimerForTesting(&timer);
EXPECT_FALSE(writer.HasPendingWrite());
@@ -265,7 +265,7 @@ TEST_F(ImportantFileWriterTest, DoScheduledWrite) {
}
TEST_F(ImportantFileWriterTest, BatchingWrites) {
- MockTimer timer(true, false);
+ MockOneShotTimer timer;
ImportantFileWriter writer(file_, ThreadTaskRunnerHandle::Get());
writer.SetTimerForTesting(&timer);
DataSerializer foo("foo"), bar("bar"), baz("baz");
@@ -280,7 +280,7 @@ TEST_F(ImportantFileWriterTest, BatchingWrites) {
}
TEST_F(ImportantFileWriterTest, ScheduleWrite_FailToSerialize) {
- MockTimer timer(true, false);
+ MockOneShotTimer timer;
ImportantFileWriter writer(file_, ThreadTaskRunnerHandle::Get());
writer.SetTimerForTesting(&timer);
EXPECT_FALSE(writer.HasPendingWrite());
@@ -295,7 +295,7 @@ TEST_F(ImportantFileWriterTest, ScheduleWrite_FailToSerialize) {
}
TEST_F(ImportantFileWriterTest, ScheduleWrite_WriteNow) {
- MockTimer timer(true, false);
+ MockOneShotTimer timer;
ImportantFileWriter writer(file_, ThreadTaskRunnerHandle::Get());
writer.SetTimerForTesting(&timer);
EXPECT_FALSE(writer.HasPendingWrite());
@@ -312,7 +312,7 @@ TEST_F(ImportantFileWriterTest, ScheduleWrite_WriteNow) {
}
TEST_F(ImportantFileWriterTest, DoScheduledWrite_FailToSerialize) {
- MockTimer timer(true, false);
+ MockOneShotTimer timer;
ImportantFileWriter writer(file_, ThreadTaskRunnerHandle::Get());
writer.SetTimerForTesting(&timer);
EXPECT_FALSE(writer.HasPendingWrite());
diff --git a/chromium/base/fuchsia/async_dispatcher.cc b/chromium/base/fuchsia/async_dispatcher.cc
index 0f344b0687f..b25b9f76c91 100644
--- a/chromium/base/fuchsia/async_dispatcher.cc
+++ b/chromium/base/fuchsia/async_dispatcher.cc
@@ -7,6 +7,8 @@
#include <lib/async/default.h>
#include <lib/async/task.h>
#include <lib/async/wait.h>
+#include <lib/zx/handle.h>
+#include <lib/zx/time.h>
#include <zircon/syscalls.h>
#include "base/fuchsia/fuchsia_logging.h"
@@ -65,21 +67,19 @@ class AsyncDispatcher::TaskState : public LinkNode<TaskState> {
};
AsyncDispatcher::AsyncDispatcher() : ops_storage_({}) {
- zx_status_t status = zx_port_create(0u, port_.receive());
+ zx_status_t status = zx::port::create(0u, &port_);
ZX_DCHECK(status == ZX_OK, status);
- status = zx_timer_create(0u, ZX_CLOCK_MONOTONIC, timer_.receive());
+ status = zx::timer::create(0u, ZX_CLOCK_MONOTONIC, &timer_);
ZX_DCHECK(status == ZX_OK, status);
- status =
- zx_object_wait_async(timer_.get(), port_.get(), key_from_ptr(&timer_),
- ZX_TIMER_SIGNALED, ZX_WAIT_ASYNC_REPEATING);
+ status = timer_.wait_async(port_, key_from_ptr(&timer_), ZX_TIMER_SIGNALED,
+ ZX_WAIT_ASYNC_REPEATING);
ZX_DCHECK(status == ZX_OK, status);
- status = zx_event_create(0, stop_event_.receive());
+ status = zx::event::create(0, &stop_event_);
ZX_DCHECK(status == ZX_OK, status);
- status = zx_object_wait_async(stop_event_.get(), port_.get(),
- key_from_ptr(&stop_event_), ZX_EVENT_SIGNALED,
- ZX_WAIT_ASYNC_REPEATING);
+ status = stop_event_.wait_async(port_, key_from_ptr(&stop_event_),
+ ZX_EVENT_SIGNALED, ZX_WAIT_ASYNC_REPEATING);
ZX_DCHECK(status == ZX_OK, status);
ops_storage_.v1.now = NowOp;
@@ -123,7 +123,7 @@ zx_status_t AsyncDispatcher::DispatchOrWaitUntil(zx_time_t deadline) {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
zx_port_packet_t packet = {};
- zx_status_t status = zx_port_wait(port_.get(), deadline, &packet, 1);
+ zx_status_t status = port_.wait(zx::time(deadline), &packet);
if (status != ZX_OK)
return status;
@@ -206,9 +206,10 @@ zx_status_t AsyncDispatcher::BeginWait(async_wait_t* wait) {
static_assert(sizeof(AsyncDispatcher::WaitState) <= sizeof(async_state_t),
"WaitState is too big");
WaitState* state = new (&wait->state) WaitState(this);
- zx_status_t status = zx_object_wait_async(wait->object, port_.get(),
- reinterpret_cast<uintptr_t>(wait),
- wait->trigger, ZX_WAIT_ASYNC_ONCE);
+ zx_status_t status =
+ zx::unowned_handle(wait->object)
+ ->wait_async(port_, reinterpret_cast<uintptr_t>(wait), wait->trigger,
+ ZX_WAIT_ASYNC_ONCE);
if (status != ZX_OK)
state->~WaitState();
@@ -220,7 +221,7 @@ zx_status_t AsyncDispatcher::CancelWait(async_wait_t* wait) {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
zx_status_t status =
- zx_port_cancel(port_.get(), wait->object, (uintptr_t)wait);
+ port_.cancel(wait->object, reinterpret_cast<uintptr_t>(wait));
if (status == ZX_OK) {
WaitState* state = reinterpret_cast<WaitState*>(&(wait->state));
state->~WaitState();
@@ -314,7 +315,7 @@ void AsyncDispatcher::RestartTimerLocked() {
if (task_list_.empty())
return;
zx_time_t deadline = task_list_.head()->value()->task()->deadline;
- zx_status_t status = zx_timer_set(timer_.get(), deadline, 0);
+ zx_status_t status = timer_.set(zx::time(deadline), zx::duration());
ZX_DCHECK(status == ZX_OK, status);
}
diff --git a/chromium/base/fuchsia/async_dispatcher.h b/chromium/base/fuchsia/async_dispatcher.h
index 3029701e365..e464d308439 100644
--- a/chromium/base/fuchsia/async_dispatcher.h
+++ b/chromium/base/fuchsia/async_dispatcher.h
@@ -6,9 +6,11 @@
#define BASE_FUCHSIA_ASYNC_DISPATCHER_H_
#include <lib/async/dispatcher.h>
+#include <lib/zx/event.h>
+#include <lib/zx/port.h>
+#include <lib/zx/timer.h>
#include "base/containers/linked_list.h"
-#include "base/fuchsia/scoped_zx_handle.h"
#include "base/macros.h"
#include "base/synchronization/lock.h"
#include "base/threading/thread_checker.h"
@@ -64,9 +66,9 @@ class BASE_EXPORT AsyncDispatcher : public async_t {
THREAD_CHECKER(thread_checker_);
- ScopedZxHandle port_;
- ScopedZxHandle timer_;
- ScopedZxHandle stop_event_;
+ zx::port port_;
+ zx::timer timer_;
+ zx::event stop_event_;
LinkedList<WaitState> wait_list_;
diff --git a/chromium/base/fuchsia/async_dispatcher_unittest.cc b/chromium/base/fuchsia/async_dispatcher_unittest.cc
index 7f1722f4fe4..351bbedbffc 100644
--- a/chromium/base/fuchsia/async_dispatcher_unittest.cc
+++ b/chromium/base/fuchsia/async_dispatcher_unittest.cc
@@ -7,9 +7,9 @@
#include <lib/async/default.h>
#include <lib/async/task.h>
#include <lib/async/wait.h>
+#include <lib/zx/socket.h>
#include "base/callback.h"
-#include "base/fuchsia/scoped_zx_handle.h"
#include "base/test/test_timeouts.h"
#include "base/time/time.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -98,8 +98,7 @@ class AsyncDispatcherTest : public testing::Test {
async_ = async_get_default();
EXPECT_TRUE(async_);
- EXPECT_EQ(zx_socket_create(ZX_SOCKET_DATAGRAM, socket1_.receive(),
- socket2_.receive()),
+ EXPECT_EQ(zx::socket::create(ZX_SOCKET_DATAGRAM, &socket1_, &socket2_),
ZX_OK);
}
@@ -120,8 +119,8 @@ class AsyncDispatcherTest : public testing::Test {
async_t* async_ = nullptr;
- base::ScopedZxHandle socket1_;
- base::ScopedZxHandle socket2_;
+ zx::socket socket1_;
+ zx::socket socket2_;
};
TEST_F(AsyncDispatcherTest, PostTask) {
@@ -179,8 +178,8 @@ TEST_F(AsyncDispatcherTest, Wait) {
EXPECT_EQ(wait.num_calls, 0);
char byte = 0;
- EXPECT_EQ(zx_socket_write(socket2_.get(), /*options=*/0, &byte, sizeof(byte),
- /*actual=*/nullptr),
+ EXPECT_EQ(socket2_.write(/*options=*/0, &byte, sizeof(byte),
+ /*actual=*/nullptr),
ZX_OK);
zx_status_t status = dispatcher_->DispatchOrWaitUntil(
@@ -196,8 +195,8 @@ TEST_F(AsyncDispatcherTest, CancelWait) {
EXPECT_EQ(async_begin_wait(async_, &wait), ZX_OK);
char byte = 0;
- EXPECT_EQ(zx_socket_write(socket2_.get(), /*options=*/0, &byte, sizeof(byte),
- /*actual=*/nullptr),
+ EXPECT_EQ(socket2_.write(/*options=*/0, &byte, sizeof(byte),
+ /*actual=*/nullptr),
ZX_OK);
EXPECT_EQ(async_cancel_wait(async_, &wait), ZX_OK);
diff --git a/chromium/base/fuchsia/component_context.cc b/chromium/base/fuchsia/component_context.cc
index 5cfb8e8666b..a6d8f5bd600 100644
--- a/chromium/base/fuchsia/component_context.cc
+++ b/chromium/base/fuchsia/component_context.cc
@@ -4,10 +4,11 @@
#include "base/fuchsia/component_context.h"
-#include <fdio/util.h>
+#include <lib/fdio/util.h>
+#include <lib/zx/channel.h>
+#include <utility>
-#include "base/fuchsia/scoped_zx_handle.h"
-#include "base/fuchsia/services_directory.h"
+#include "base/fuchsia/fuchsia_logging.h"
#include "base/no_destructor.h"
namespace base {
@@ -16,19 +17,19 @@ namespace fuchsia {
namespace {
// static
-ScopedZxHandle ConnectToServiceRoot() {
- ScopedZxHandle h1;
- ScopedZxHandle h2;
- zx_status_t result = zx_channel_create(0, h1.receive(), h2.receive());
+zx::channel ConnectToServiceRoot() {
+ zx::channel client_channel;
+ zx::channel server_channel;
+ zx_status_t result = zx::channel::create(0, &client_channel, &server_channel);
ZX_CHECK(result == ZX_OK, result) << "zx_channel_create()";
- result = fdio_service_connect("/svc/.", h1.release());
+ result = fdio_service_connect("/svc/.", server_channel.release());
ZX_CHECK(result == ZX_OK, result) << "Failed to open /svc";
- return h2;
+ return client_channel;
}
} // namespace
-ComponentContext::ComponentContext(ScopedZxHandle service_root)
+ComponentContext::ComponentContext(zx::channel service_root)
: service_root_(std::move(service_root)) {
DCHECK(service_root_);
}
@@ -42,13 +43,11 @@ ComponentContext* ComponentContext::GetDefault() {
return component_context.get();
}
-void ComponentContext::ConnectToService(FidlInterfaceRequest request) {
+zx_status_t ComponentContext::ConnectToService(FidlInterfaceRequest request) {
DCHECK(request.is_valid());
- zx_status_t result =
- fdio_service_connect_at(service_root_.get(), request.interface_name(),
- request.TakeChannel().release());
- ZX_CHECK(result == ZX_OK, result) << "fdio_service_connect_at()";
+ return fdio_service_connect_at(service_root_.get(), request.interface_name(),
+ request.TakeChannel().release());
}
} // namespace fuchsia
-} // namespace base \ No newline at end of file
+} // namespace base
diff --git a/chromium/base/fuchsia/component_context.h b/chromium/base/fuchsia/component_context.h
index 36cf48779a6..d7ff9ddb8ad 100644
--- a/chromium/base/fuchsia/component_context.h
+++ b/chromium/base/fuchsia/component_context.h
@@ -5,9 +5,10 @@
#ifndef BASE_FUCHSIA_COMPONENT_CONTEXT_H_
#define BASE_FUCHSIA_COMPONENT_CONTEXT_H_
+#include <lib/zx/channel.h>
+
#include "base/base_export.h"
#include "base/fuchsia/fidl_interface_request.h"
-#include "base/fuchsia/scoped_zx_handle.h"
#include "base/macros.h"
#include "base/strings/string_piece.h"
@@ -27,7 +28,7 @@ namespace fuchsia {
// Provides access to the component's environment.
class BASE_EXPORT ComponentContext {
public:
- explicit ComponentContext(ScopedZxHandle service_root);
+ explicit ComponentContext(zx::channel service_root);
~ComponentContext();
// Returns default ComponentContext instance for the current process. It uses
@@ -35,7 +36,7 @@ class BASE_EXPORT ComponentContext {
static ComponentContext* GetDefault();
// Satisfies the interface |request| by binding the channel to a service.
- void ConnectToService(FidlInterfaceRequest request);
+ zx_status_t ConnectToService(FidlInterfaceRequest request);
// Same as above, but returns interface pointer instead of taking a request.
template <typename Interface>
@@ -55,7 +56,7 @@ class BASE_EXPORT ComponentContext {
}
private:
- ScopedZxHandle service_root_;
+ zx::channel service_root_;
DISALLOW_COPY_AND_ASSIGN(ComponentContext);
};
@@ -63,4 +64,4 @@ class BASE_EXPORT ComponentContext {
} // namespace fuchsia
} // namespace base
-#endif // BASE_FUCHSIA_COMPONENT_CONTEXT_H_ \ No newline at end of file
+#endif // BASE_FUCHSIA_COMPONENT_CONTEXT_H_
diff --git a/chromium/base/fuchsia/default_job.cc b/chromium/base/fuchsia/default_job.cc
index 366b14f6575..c26aeb34afc 100644
--- a/chromium/base/fuchsia/default_job.cc
+++ b/chromium/base/fuchsia/default_job.cc
@@ -4,7 +4,7 @@
#include "base/fuchsia/default_job.h"
-#include <zircon/process.h>
+#include <zircon/types.h>
#include "base/logging.h"
@@ -12,16 +12,16 @@ namespace base {
namespace {
zx_handle_t g_job = ZX_HANDLE_INVALID;
-} // namespace
+}
-zx_handle_t GetDefaultJob() {
+zx::unowned_job GetDefaultJob() {
if (g_job == ZX_HANDLE_INVALID)
- return zx_job_default();
- return g_job;
+ return zx::job::default_job();
+ return zx::unowned_job(g_job);
}
-void SetDefaultJob(ScopedZxHandle job) {
- DCHECK_EQ(ZX_HANDLE_INVALID, g_job);
+void SetDefaultJob(zx::job job) {
+ DCHECK_EQ(g_job, ZX_HANDLE_INVALID);
g_job = job.release();
}
diff --git a/chromium/base/fuchsia/default_job.h b/chromium/base/fuchsia/default_job.h
index f5f5c3ad21b..9417f1c3242 100644
--- a/chromium/base/fuchsia/default_job.h
+++ b/chromium/base/fuchsia/default_job.h
@@ -5,17 +5,18 @@
#ifndef BASE_FUCHSIA_DEFAULT_JOB_H_
#define BASE_FUCHSIA_DEFAULT_JOB_H_
+#include <lib/zx/job.h>
+
#include "base/base_export.h"
-#include "base/fuchsia/scoped_zx_handle.h"
namespace base {
// Gets and sets the job object used for creating new child processes,
// and looking them up by their process IDs.
-// zx_job_default() will be returned if no job is explicitly set here.
+// zx::job::default_job() will be returned if no job is explicitly set here.
// Only valid handles may be passed to SetDefaultJob().
-BASE_EXPORT zx_handle_t GetDefaultJob();
-BASE_EXPORT void SetDefaultJob(ScopedZxHandle job);
+BASE_EXPORT zx::unowned_job GetDefaultJob();
+BASE_EXPORT void SetDefaultJob(zx::job job);
} // namespace base
diff --git a/chromium/base/fuchsia/fidl_interface_request.cc b/chromium/base/fuchsia/fidl_interface_request.cc
index 34d6d32d29a..f0eaa1e551e 100644
--- a/chromium/base/fuchsia/fidl_interface_request.cc
+++ b/chromium/base/fuchsia/fidl_interface_request.cc
@@ -11,14 +11,21 @@ FidlInterfaceRequest::FidlInterfaceRequest(FidlInterfaceRequest&& moved) =
default;
FidlInterfaceRequest::FidlInterfaceRequest(const char* interface_name,
- ScopedZxHandle channel)
+ zx::channel channel)
: interface_name_(interface_name), channel_(std::move(channel)) {}
FidlInterfaceRequest::~FidlInterfaceRequest() = default;
-ScopedZxHandle FidlInterfaceRequest::TakeChannel() {
+// static
+FidlInterfaceRequest FidlInterfaceRequest::CreateFromChannelUnsafe(
+ const char* interface_name,
+ zx::channel channel) {
+ return FidlInterfaceRequest(interface_name, std::move(channel));
+}
+
+zx::channel FidlInterfaceRequest::TakeChannel() {
DCHECK(channel_);
return std::move(channel_);
}
} // namespace fuchsia
-} // namespace base \ No newline at end of file
+} // namespace base
diff --git a/chromium/base/fuchsia/fidl_interface_request.h b/chromium/base/fuchsia/fidl_interface_request.h
index 176b4e21103..b3dc3019400 100644
--- a/chromium/base/fuchsia/fidl_interface_request.h
+++ b/chromium/base/fuchsia/fidl_interface_request.h
@@ -5,8 +5,9 @@
#ifndef BASE_FUCHSIA_FIDL_INTERFACE_REQUEST_H_
#define BASE_FUCHSIA_FIDL_INTERFACE_REQUEST_H_
+#include <lib/zx/channel.h>
+
#include "base/base_export.h"
-#include "base/fuchsia/scoped_zx_handle.h"
#include "base/macros.h"
#include "base/strings/string_piece.h"
@@ -35,9 +36,7 @@ class BASE_EXPORT FidlInterfaceRequest {
public:
template <typename Interface>
explicit FidlInterfaceRequest(fidl::InterfaceRequest<Interface> request)
- : FidlInterfaceRequest(
- Interface::Name_,
- ScopedZxHandle::FromZxChannel(request.TakeChannel())) {}
+ : FidlInterfaceRequest(Interface::Name_, request.TakeChannel()) {}
// Creates a new request for |Interface| and binds the client end to the
// |stub|. |stub| can be used immediately after the request is created, even
@@ -53,6 +52,13 @@ class BASE_EXPORT FidlInterfaceRequest {
FidlInterfaceRequest(FidlInterfaceRequest&&);
~FidlInterfaceRequest();
+ // Creates an interface request from the specified |channel|. Caller must
+ // ensure that the specified |interface_name| is valid for the specified
+ // |channel|.
+ static FidlInterfaceRequest CreateFromChannelUnsafe(
+ const char* interface_name,
+ zx::channel channel);
+
bool is_valid() const { return interface_name_ && channel_; }
const char* interface_name() const { return interface_name_; }
@@ -60,13 +66,13 @@ class BASE_EXPORT FidlInterfaceRequest {
// Extracts the channel handle to be passed to service implementation. The
// request becomes invalid after this call, i.e. TakeChannel() can be called
// only once.
- ScopedZxHandle TakeChannel();
+ zx::channel TakeChannel();
private:
- FidlInterfaceRequest(const char* interface_name, ScopedZxHandle channel);
+ FidlInterfaceRequest(const char* interface_name, zx::channel channel);
const char* interface_name_;
- ScopedZxHandle channel_;
+ zx::channel channel_;
DISALLOW_COPY_AND_ASSIGN(FidlInterfaceRequest);
};
@@ -74,4 +80,4 @@ class BASE_EXPORT FidlInterfaceRequest {
} // namespace fuchsia
} // namespace base
-#endif // BASE_FUCHSIA_FIDL_INTERFACE_REQUEST_H_ \ No newline at end of file
+#endif // BASE_FUCHSIA_FIDL_INTERFACE_REQUEST_H_
diff --git a/chromium/base/fuchsia/file_utils.cc b/chromium/base/fuchsia/file_utils.cc
new file mode 100644
index 00000000000..92bd854d8a7
--- /dev/null
+++ b/chromium/base/fuchsia/file_utils.cc
@@ -0,0 +1,49 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/fuchsia/file_utils.h"
+
+#include <lib/fdio/limits.h>
+#include <lib/fdio/util.h>
+#include <zircon/processargs.h>
+#include <utility>
+
+#include "base/files/file.h"
+#include "base/fuchsia/fuchsia_logging.h"
+
+namespace base {
+namespace fuchsia {
+
+zx::handle GetHandleFromFile(File file) {
+ // Unwrap the FD into |handles|. Negative result indicates failure.
+ zx_handle_t handles[FDIO_MAX_HANDLES] = {};
+ uint32_t types[FDIO_MAX_HANDLES] = {};
+ zx_status_t num_handles =
+ fdio_transfer_fd(file.GetPlatformFile(), 0, handles, types);
+ if (num_handles <= 0) {
+ DCHECK_LT(num_handles, 0);
+ ZX_DLOG(ERROR, num_handles) << "fdio_transfer_fd";
+ return zx::handle();
+ }
+
+ // fdio_transfer_fd() has torn-down the file-descriptor, on success.
+ ignore_result(file.TakePlatformFile());
+
+ // Wrap the returned handles, so they will be closed on error.
+ zx::handle owned_handles[FDIO_MAX_HANDLES];
+ for (int i = 0; i < FDIO_MAX_HANDLES; ++i)
+ owned_handles[i] = zx::handle(handles[i]);
+
+ // We expect a single handle, of type PA_FDIO_REMOTE.
+ if (num_handles != 1 || types[0] != PA_FDIO_REMOTE) {
+ DLOG(ERROR) << "Specified file has " << num_handles
+ << " handles, and type: " << types[0];
+ return zx::handle();
+ }
+
+ return std::move(owned_handles[0]);
+}
+
+} // namespace fuchsia
+} // namespace base
diff --git a/chromium/base/fuchsia/file_utils.h b/chromium/base/fuchsia/file_utils.h
new file mode 100644
index 00000000000..ee091acac65
--- /dev/null
+++ b/chromium/base/fuchsia/file_utils.h
@@ -0,0 +1,25 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FUCHSIA_FILE_UTILS_H_
+#define BASE_FUCHSIA_FILE_UTILS_H_
+
+#include <lib/zx/handle.h>
+
+#include "base/base_export.h"
+
+namespace base {
+
+class File;
+
+namespace fuchsia {
+
+// Gets a Zircon handle from a file or directory |path| in the process'
+// namespace.
+BASE_EXPORT zx::handle GetHandleFromFile(base::File file);
+
+} // namespace fuchsia
+} // namespace base
+
+#endif // BASE_FUCHSIA_FILE_UTILS_H_
diff --git a/chromium/base/fuchsia/filtered_service_directory.cc b/chromium/base/fuchsia/filtered_service_directory.cc
new file mode 100644
index 00000000000..af231759125
--- /dev/null
+++ b/chromium/base/fuchsia/filtered_service_directory.cc
@@ -0,0 +1,63 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/fuchsia/filtered_service_directory.h"
+
+#include <lib/fdio/util.h>
+#include <lib/zx/channel.h>
+
+#include "base/bind.h"
+#include "base/fuchsia/component_context.h"
+#include "base/fuchsia/fuchsia_logging.h"
+
+namespace base {
+namespace fuchsia {
+
+FilteredServiceDirectory::FilteredServiceDirectory(
+ ComponentContext* component_context)
+ : component_context_(component_context) {
+ zx::channel server_channel;
+ zx_status_t status =
+ zx::channel::create(0, &server_channel, &directory_client_channel_);
+ ZX_CHECK(status == ZX_OK, status) << "zx_channel_create()";
+
+ service_directory_ =
+ std::make_unique<ServiceDirectory>(std::move(server_channel));
+}
+
+FilteredServiceDirectory::~FilteredServiceDirectory() {
+ service_directory_->RemoveAllServices();
+}
+
+void FilteredServiceDirectory::AddService(const char* service_name) {
+ service_directory_->AddService(
+ service_name,
+ base::BindRepeating(&FilteredServiceDirectory::HandleRequest,
+ base::Unretained(this), service_name));
+}
+
+zx::channel FilteredServiceDirectory::ConnectClient() {
+ zx::channel server_channel;
+ zx::channel client_channel;
+ zx_status_t status = zx::channel::create(0, &server_channel, &client_channel);
+ ZX_CHECK(status == ZX_OK, status) << "zx_channel_create()";
+
+ // ServiceDirectory puts public services under ./public . Connect to that
+ // directory and return client handle for the connection,
+ status = fdio_service_connect_at(directory_client_channel_.get(), "public",
+ server_channel.release());
+ ZX_CHECK(status == ZX_OK, status) << "fdio_service_connect_at()";
+
+ return client_channel;
+}
+
+void FilteredServiceDirectory::HandleRequest(const char* service_name,
+ zx::channel channel) {
+ component_context_->ConnectToService(
+ FidlInterfaceRequest::CreateFromChannelUnsafe(service_name,
+ std::move(channel)));
+}
+
+} // namespace fuchsia
+} // namespace base
diff --git a/chromium/base/fuchsia/filtered_service_directory.h b/chromium/base/fuchsia/filtered_service_directory.h
new file mode 100644
index 00000000000..0c4bf25eea7
--- /dev/null
+++ b/chromium/base/fuchsia/filtered_service_directory.h
@@ -0,0 +1,50 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FUCHSIA_FILTERED_SERVICE_DIRECTORY_H_
+#define BASE_FUCHSIA_FILTERED_SERVICE_DIRECTORY_H_
+
+#include "base/fuchsia/service_directory.h"
+
+#include <lib/zx/channel.h>
+
+#include "base/macros.h"
+
+namespace base {
+namespace fuchsia {
+
+class ComponentContext;
+
+// ServiceDirectory that uses the supplied ComponentContext to satisfy requests
+// for only a restricted set of services.
+class BASE_EXPORT FilteredServiceDirectory {
+ public:
+ // Creates proxy that proxies requests to the specified |component_context|,
+ // which must outlive the proxy.
+ explicit FilteredServiceDirectory(ComponentContext* component_context);
+ ~FilteredServiceDirectory();
+
+ // Adds the specified service to the list of whitelisted services.
+ void AddService(const char* service_name);
+
+ // Returns a client channel connected to the directory. The returned channel
+ // can be passed to a sandboxed process to be used for /svc namespace.
+ zx::channel ConnectClient();
+
+ private:
+ void HandleRequest(const char* service_name, zx::channel channel);
+
+ ComponentContext* const component_context_;
+ std::unique_ptr<ServiceDirectory> service_directory_;
+
+ // Client side of the channel used by |service_directory_|.
+ zx::channel directory_client_channel_;
+
+ DISALLOW_COPY_AND_ASSIGN(FilteredServiceDirectory);
+};
+
+} // namespace fuchsia
+} // namespace base
+
+#endif // BASE_FUCHSIA_FILTERED_SERVICE_DIRECTORY_H_
diff --git a/chromium/base/fuchsia/filtered_service_directory_unittest.cc b/chromium/base/fuchsia/filtered_service_directory_unittest.cc
new file mode 100644
index 00000000000..028281851b5
--- /dev/null
+++ b/chromium/base/fuchsia/filtered_service_directory_unittest.cc
@@ -0,0 +1,84 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/fuchsia/filtered_service_directory.h"
+
+#include <lib/zx/channel.h>
+#include <utility>
+
+#include "base/fuchsia/service_directory_test_base.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace fuchsia {
+
+class FilteredServiceDirectoryTest : public ServiceDirectoryTestBase {
+ public:
+ FilteredServiceDirectoryTest() {
+ filtered_service_dir_ =
+ std::make_unique<FilteredServiceDirectory>(client_context_.get());
+ filtered_client_context_ = std::make_unique<ComponentContext>(
+ filtered_service_dir_->ConnectClient());
+ }
+
+ protected:
+ std::unique_ptr<FilteredServiceDirectory> filtered_service_dir_;
+ std::unique_ptr<ComponentContext> filtered_client_context_;
+};
+
+// Verify that we can connect to a whitelisted service.
+TEST_F(FilteredServiceDirectoryTest, Connect) {
+ filtered_service_dir_->AddService(test_fidl::TestInterface::Name_);
+
+ auto stub =
+ filtered_client_context_->ConnectToService<test_fidl::TestInterface>();
+ VerifyTestInterface(&stub, false);
+}
+
+// Verify that multiple connections to the same service work properly.
+TEST_F(FilteredServiceDirectoryTest, ConnectMultiple) {
+ filtered_service_dir_->AddService(test_fidl::TestInterface::Name_);
+
+ auto stub1 =
+ filtered_client_context_->ConnectToService<test_fidl::TestInterface>();
+ auto stub2 =
+ filtered_client_context_->ConnectToService<test_fidl::TestInterface>();
+ VerifyTestInterface(&stub1, false);
+ VerifyTestInterface(&stub2, false);
+}
+
+// Verify that non-whitelisted services are blocked.
+TEST_F(FilteredServiceDirectoryTest, ServiceBlocked) {
+ auto stub =
+ filtered_client_context_->ConnectToService<test_fidl::TestInterface>();
+ VerifyTestInterface(&stub, true);
+}
+
+// Verify that FilteredServiceDirectory handles the case when the target service
+// is not available in the underlying service directory.
+TEST_F(FilteredServiceDirectoryTest, NoService) {
+ filtered_service_dir_->AddService(test_fidl::TestInterface::Name_);
+
+ service_binding_.reset();
+
+ auto stub =
+ filtered_client_context_->ConnectToService<test_fidl::TestInterface>();
+ VerifyTestInterface(&stub, true);
+}
+
+// Verify that FilteredServiceDirectory handles the case when the underlying
+// service directory is destroyed.
+TEST_F(FilteredServiceDirectoryTest, NoServiceDir) {
+ filtered_service_dir_->AddService(test_fidl::TestInterface::Name_);
+
+ service_binding_.reset();
+ service_directory_.reset();
+
+ auto stub =
+ filtered_client_context_->ConnectToService<test_fidl::TestInterface>();
+ VerifyTestInterface(&stub, true);
+}
+
+} // namespace fuchsia
+} // namespace base
diff --git a/chromium/base/fuchsia/scoped_service_binding.h b/chromium/base/fuchsia/scoped_service_binding.h
index 0fbcbe6684f..3acabde7a36 100644
--- a/chromium/base/fuchsia/scoped_service_binding.h
+++ b/chromium/base/fuchsia/scoped_service_binding.h
@@ -5,22 +5,20 @@
#ifndef BASE_FUCHSIA_SCOPED_SERVICE_BINDING_H_
#define BASE_FUCHSIA_SCOPED_SERVICE_BINDING_H_
-#include <lib/fidl/cpp/binding.h>
+#include <lib/fidl/cpp/binding_set.h>
#include "base/bind.h"
-#include "base/fuchsia/services_directory.h"
+#include "base/fuchsia/service_directory.h"
namespace base {
namespace fuchsia {
-class ServicesDirectory;
-
template <typename Interface>
class ScopedServiceBinding {
public:
- // |services_directory| and |impl| must outlive the binding.
- ScopedServiceBinding(ServicesDirectory* services_directory, Interface* impl)
- : directory_(services_directory), binding_(impl) {
+ // |service_directory| and |impl| must outlive the binding.
+ ScopedServiceBinding(ServiceDirectory* service_directory, Interface* impl)
+ : directory_(service_directory), impl_(impl) {
directory_->AddService(
Interface::Name_,
BindRepeating(&ScopedServiceBinding::BindClient, Unretained(this)));
@@ -28,14 +26,27 @@ class ScopedServiceBinding {
~ScopedServiceBinding() { directory_->RemoveService(Interface::Name_); }
+ void SetOnLastClientCallback(base::OnceClosure on_last_client_callback) {
+ on_last_client_callback_ = std::move(on_last_client_callback);
+ bindings_.set_empty_set_handler(
+ fit::bind_member(this, &ScopedServiceBinding::OnBindingSetEmpty));
+ }
+
private:
- void BindClient(ScopedZxHandle channel) {
- binding_.Bind(typename fidl::InterfaceRequest<Interface>(
- zx::channel(channel.release())));
+ void BindClient(zx::channel channel) {
+ bindings_.AddBinding(impl_,
+ fidl::InterfaceRequest<Interface>(std::move(channel)));
+ }
+
+ void OnBindingSetEmpty() {
+ bindings_.set_empty_set_handler(nullptr);
+ std::move(on_last_client_callback_).Run();
}
- ServicesDirectory* directory_;
- fidl::Binding<Interface> binding_;
+ ServiceDirectory* const directory_;
+ Interface* const impl_;
+ fidl::BindingSet<Interface> bindings_;
+ base::OnceClosure on_last_client_callback_;
DISALLOW_COPY_AND_ASSIGN(ScopedServiceBinding);
};
@@ -43,4 +54,4 @@ class ScopedServiceBinding {
} // namespace fuchsia
} // namespace base
-#endif // BASE_FUCHSIA_SCOPED_SERVICE_BINDING_H_ \ No newline at end of file
+#endif // BASE_FUCHSIA_SCOPED_SERVICE_BINDING_H_
diff --git a/chromium/base/fuchsia/scoped_zx_handle.cc b/chromium/base/fuchsia/scoped_zx_handle.cc
deleted file mode 100644
index 7379b2fa440..00000000000
--- a/chromium/base/fuchsia/scoped_zx_handle.cc
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/fuchsia/scoped_zx_handle.h"
-
-#include <lib/zx/channel.h>
-
-namespace base {
-
-// static
-ScopedZxHandle ScopedZxHandle::FromZxChannel(zx::channel channel) {
- return ScopedZxHandle(channel.release());
-}
-
-} // namespace base
diff --git a/chromium/base/fuchsia/scoped_zx_handle.h b/chromium/base/fuchsia/scoped_zx_handle.h
index 8b9838a2f22..33cf5124df7 100644
--- a/chromium/base/fuchsia/scoped_zx_handle.h
+++ b/chromium/base/fuchsia/scoped_zx_handle.h
@@ -5,42 +5,14 @@
#ifndef BASE_FUCHSIA_SCOPED_ZX_HANDLE_H_
#define BASE_FUCHSIA_SCOPED_ZX_HANDLE_H_
-#include <zircon/status.h>
-#include <zircon/syscalls.h>
-
-#include "base/base_export.h"
-#include "base/fuchsia/fuchsia_logging.h"
-#include "base/scoped_generic.h"
-
-namespace zx {
-class channel;
-}
+#include <lib/zx/handle.h>
namespace base {
-namespace internal {
-
-struct ScopedZxHandleTraits {
- static zx_handle_t InvalidValue() { return ZX_HANDLE_INVALID; }
- static void Free(zx_handle_t object) {
- zx_status_t status = zx_handle_close(object);
- ZX_CHECK(status == ZX_OK, status) << "zx_handle_close";
- }
-};
-
-} // namespace internal
-
-class BASE_EXPORT ScopedZxHandle
- : public ScopedGeneric<zx_handle_t, internal::ScopedZxHandleTraits> {
- public:
- ScopedZxHandle() = default;
- explicit ScopedZxHandle(zx_handle_t value) : ScopedGeneric(value) {}
-
- explicit operator bool() const { return get() != ZX_HANDLE_INVALID; }
-
- // Helper to converts zx::channel to ScopedZxHandle.
- static ScopedZxHandle FromZxChannel(zx::channel channel);
-};
+// TODO(852541): Temporary shim to implement the old ScopedGeneric based
+// container as a native zx::handle. Remove this once all callers have been
+// migrated to use the libzx containers.
+using ScopedZxHandle = zx::handle;
} // namespace base
diff --git a/chromium/base/fuchsia/services_directory.cc b/chromium/base/fuchsia/service_directory.cc
index 56dbd59c066..32eaaa51d99 100644
--- a/chromium/base/fuchsia/services_directory.cc
+++ b/chromium/base/fuchsia/service_directory.cc
@@ -2,10 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "base/fuchsia/services_directory.h"
+#include "base/fuchsia/service_directory.h"
#include <lib/async/default.h>
#include <lib/svc/dir.h>
+#include <lib/zx/channel.h>
#include <zircon/process.h>
#include <zircon/processargs.h>
@@ -16,13 +17,13 @@
namespace base {
namespace fuchsia {
-ServicesDirectory::ServicesDirectory(ScopedZxHandle directory_request) {
+ServiceDirectory::ServiceDirectory(zx::channel directory_request) {
zx_status_t status = svc_dir_create(async_get_default(),
directory_request.release(), &svc_dir_);
ZX_CHECK(status == ZX_OK, status);
}
-ServicesDirectory::~ServicesDirectory() {
+ServiceDirectory::~ServiceDirectory() {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
DCHECK(services_.empty());
@@ -31,26 +32,32 @@ ServicesDirectory::~ServicesDirectory() {
}
// static
-ServicesDirectory* ServicesDirectory::GetDefault() {
- static base::NoDestructor<ServicesDirectory> directory(
- ScopedZxHandle(zx_get_startup_handle(PA_DIRECTORY_REQUEST)));
+ServiceDirectory* ServiceDirectory::GetDefault() {
+ static base::NoDestructor<ServiceDirectory> directory(
+ zx::channel(zx_take_startup_handle(PA_DIRECTORY_REQUEST)));
return directory.get();
}
-void ServicesDirectory::AddService(StringPiece name,
- ConnectServiceCallback connect_callback) {
+void ServiceDirectory::AddService(StringPiece name,
+ ConnectServiceCallback connect_callback) {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
DCHECK(services_.find(name) == services_.end());
std::string name_str = name.as_string();
services_[name_str] = connect_callback;
+
zx_status_t status =
svc_dir_add_service(svc_dir_, "public", name_str.c_str(), this,
- &ServicesDirectory::HandleConnectRequest);
+ &ServiceDirectory::HandleConnectRequest);
+ ZX_DCHECK(status == ZX_OK, status);
+
+ // Publish to the legacy "flat" namespace, which is required by some clients.
+ status = svc_dir_add_service(svc_dir_, nullptr, name_str.c_str(), this,
+ &ServiceDirectory::HandleConnectRequest);
ZX_DCHECK(status == ZX_OK, status);
}
-void ServicesDirectory::RemoveService(StringPiece name) {
+void ServiceDirectory::RemoveService(StringPiece name) {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
std::string name_str = name.as_string();
@@ -62,13 +69,23 @@ void ServicesDirectory::RemoveService(StringPiece name) {
zx_status_t status =
svc_dir_remove_service(svc_dir_, "public", name_str.c_str());
ZX_DCHECK(status == ZX_OK, status);
+
+ // Unregister from the legacy "flat" namespace.
+ status = svc_dir_remove_service(svc_dir_, nullptr, name_str.c_str());
+ ZX_DCHECK(status == ZX_OK, status);
+}
+
+void ServiceDirectory::RemoveAllServices() {
+ while (!services_.empty()) {
+ RemoveService(services_.begin()->first);
+ }
}
// static
-void ServicesDirectory::HandleConnectRequest(void* context,
- const char* service_name,
- zx_handle_t service_request) {
- auto* directory = reinterpret_cast<ServicesDirectory*>(context);
+void ServiceDirectory::HandleConnectRequest(void* context,
+ const char* service_name,
+ zx_handle_t service_request) {
+ auto* directory = reinterpret_cast<ServiceDirectory*>(context);
DCHECK_CALLED_ON_VALID_THREAD(directory->thread_checker_);
auto it = directory->services_.find(service_name);
@@ -77,7 +94,7 @@ void ServicesDirectory::HandleConnectRequest(void* context,
// services.
DCHECK(it != directory->services_.end());
- it->second.Run(ScopedZxHandle(service_request));
+ it->second.Run(zx::channel(service_request));
}
} // namespace fuchsia
diff --git a/chromium/base/fuchsia/services_directory.h b/chromium/base/fuchsia/service_directory.h
index 4322ffd87ee..f02c1d7e76e 100644
--- a/chromium/base/fuchsia/services_directory.h
+++ b/chromium/base/fuchsia/service_directory.h
@@ -2,13 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef BASE_FUCHSIA_SERVICES_DIRECTORY_H_
-#define BASE_FUCHSIA_SERVICES_DIRECTORY_H_
+#ifndef BASE_FUCHSIA_SERVICE_DIRECTORY_H_
+#define BASE_FUCHSIA_SERVICE_DIRECTORY_H_
+
+#include <lib/zx/channel.h>
#include "base/base_export.h"
#include "base/callback.h"
#include "base/containers/flat_map.h"
-#include "base/fuchsia/scoped_zx_handle.h"
#include "base/macros.h"
#include "base/strings/string_piece.h"
#include "base/threading/thread_checker.h"
@@ -23,29 +24,30 @@ namespace fuchsia {
// Normally this class should be used by creating a ScopedServiceBinding
// instance. This ensures that the service is unregistered when the
// implementation is destroyed. GetDefault() should be used to get the default
-// ServicesDirectory for the current process. The default instance exports
+// ServiceDirectory for the current process. The default instance exports
// services via a channel supplied at process creation time.
//
// Not thread-safe. All methods must be called on the thread that created the
// object.
-class BASE_EXPORT ServicesDirectory {
+class BASE_EXPORT ServiceDirectory {
public:
// Callback called to connect incoming requests.
using ConnectServiceCallback =
- base::RepeatingCallback<void(ScopedZxHandle channel)>;
+ base::RepeatingCallback<void(zx::channel channel)>;
// Creates services directory that will be served over the
// |directory_channel|.
- explicit ServicesDirectory(ScopedZxHandle directory_channel);
+ explicit ServiceDirectory(zx::channel directory_channel);
- ~ServicesDirectory();
+ ~ServiceDirectory();
// Returns default ServiceDirectory instance for the current process. It
// publishes services to the directory provided by the process creator.
- static ServicesDirectory* GetDefault();
+ static ServiceDirectory* GetDefault();
void AddService(StringPiece name, ConnectServiceCallback connect_callback);
void RemoveService(StringPiece name);
+ void RemoveAllServices();
private:
// Called by |svc_dir_| to handle service requests.
@@ -58,10 +60,10 @@ class BASE_EXPORT ServicesDirectory {
svc_dir_t* svc_dir_ = nullptr;
base::flat_map<std::string, ConnectServiceCallback> services_;
- DISALLOW_COPY_AND_ASSIGN(ServicesDirectory);
+ DISALLOW_COPY_AND_ASSIGN(ServiceDirectory);
};
} // namespace fuchsia
} // namespace base
-#endif // BASE_FUCHSIA_SERVICES_DIRECTORY_H_ \ No newline at end of file
+#endif // BASE_FUCHSIA_SERVICE_DIRECTORY_H_
diff --git a/chromium/base/fuchsia/service_directory_test_base.cc b/chromium/base/fuchsia/service_directory_test_base.cc
new file mode 100644
index 00000000000..c5a09f3b531
--- /dev/null
+++ b/chromium/base/fuchsia/service_directory_test_base.cc
@@ -0,0 +1,82 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/fuchsia/service_directory_test_base.h"
+
+#include <lib/fdio/util.h>
+
+namespace base {
+namespace fuchsia {
+
+TestInterfaceImpl::TestInterfaceImpl() = default;
+TestInterfaceImpl::~TestInterfaceImpl() = default;
+
+// TestInterface implementation.
+void TestInterfaceImpl::Add(int32_t a, int32_t b, AddCallback callback) {
+ callback(a + b);
+}
+
+ServiceDirectoryTestBase::ServiceDirectoryTestBase() {
+ zx::channel service_directory_channel;
+ EXPECT_EQ(zx::channel::create(0, &service_directory_channel,
+ &service_directory_client_channel_),
+ ZX_OK);
+
+ // Mount service dir and publish the service.
+ service_directory_ =
+ std::make_unique<ServiceDirectory>(std::move(service_directory_channel));
+ service_binding_ =
+ std::make_unique<ScopedServiceBinding<test_fidl::TestInterface>>(
+ service_directory_.get(), &test_service_);
+
+ ConnectClientContextToDirectory("public");
+}
+
+ServiceDirectoryTestBase::~ServiceDirectoryTestBase() = default;
+
+void ServiceDirectoryTestBase::ConnectClientContextToDirectory(
+ const char* path) {
+ // Open directory |path| from the service directory.
+ zx::channel public_directory_channel;
+ zx::channel public_directory_client_channel;
+ EXPECT_EQ(zx::channel::create(0, &public_directory_channel,
+ &public_directory_client_channel),
+ ZX_OK);
+ EXPECT_EQ(fdio_open_at(service_directory_client_channel_.get(), path, 0,
+ public_directory_channel.release()),
+ ZX_OK);
+
+ // Create ComponentContext and connect to the test service.
+ client_context_ = std::make_unique<ComponentContext>(
+ std::move(public_directory_client_channel));
+}
+
+void ServiceDirectoryTestBase::VerifyTestInterface(
+ fidl::InterfacePtr<test_fidl::TestInterface>* stub,
+ bool expect_error) {
+ // Call the service and wait for response.
+ base::RunLoop run_loop;
+ bool error = false;
+
+ stub->set_error_handler([&run_loop, &error]() {
+ error = true;
+ run_loop.Quit();
+ });
+
+ (*stub)->Add(2, 2, [&run_loop](int32_t result) {
+ EXPECT_EQ(result, 4);
+ run_loop.Quit();
+ });
+
+ run_loop.Run();
+
+ EXPECT_EQ(error, expect_error);
+
+ // Reset error handler because the current one captures |run_loop| and
+ // |error| references which are about to be destroyed.
+ stub->set_error_handler([]() {});
+}
+
+} // namespace fuchsia
+} // namespace base
diff --git a/chromium/base/fuchsia/service_directory_test_base.h b/chromium/base/fuchsia/service_directory_test_base.h
new file mode 100644
index 00000000000..88348a63a84
--- /dev/null
+++ b/chromium/base/fuchsia/service_directory_test_base.h
@@ -0,0 +1,50 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FUCHSIA_SERVICE_DIRECTORY_TEST_BASE_H_
+#define BASE_FUCHSIA_SERVICE_DIRECTORY_TEST_BASE_H_
+
+#include <lib/zx/channel.h>
+
+#include "base/fuchsia/component_context.h"
+#include "base/fuchsia/scoped_service_binding.h"
+#include "base/fuchsia/test_fidl/cpp/fidl.h"
+#include "base/message_loop/message_loop.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace fuchsia {
+
+class TestInterfaceImpl : public test_fidl::TestInterface {
+ public:
+ TestInterfaceImpl();
+ ~TestInterfaceImpl() override;
+
+ // TestInterface implementation.
+ void Add(int32_t a, int32_t b, AddCallback callback) override;
+};
+
+class ServiceDirectoryTestBase : public testing::Test {
+ public:
+ ServiceDirectoryTestBase();
+ ~ServiceDirectoryTestBase() override;
+
+ void ConnectClientContextToDirectory(const char* path);
+ void VerifyTestInterface(fidl::InterfacePtr<test_fidl::TestInterface>* stub,
+ bool expect_error);
+
+ protected:
+ MessageLoopForIO message_loop_;
+ std::unique_ptr<ServiceDirectory> service_directory_;
+ zx::channel service_directory_client_channel_;
+ TestInterfaceImpl test_service_;
+ std::unique_ptr<ScopedServiceBinding<test_fidl::TestInterface>>
+ service_binding_;
+ std::unique_ptr<ComponentContext> client_context_;
+};
+
+} // namespace fuchsia
+} // namespace base
+
+#endif // BASE_FUCHSIA_SERVICE_DIRECTORY_TEST_BASE_H_ \ No newline at end of file
diff --git a/chromium/base/fuchsia/service_directory_unittest.cc b/chromium/base/fuchsia/service_directory_unittest.cc
new file mode 100644
index 00000000000..0af6f1fef1e
--- /dev/null
+++ b/chromium/base/fuchsia/service_directory_unittest.cc
@@ -0,0 +1,86 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/fuchsia/service_directory.h"
+
+#include <lib/fdio/util.h>
+#include <lib/zx/channel.h>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/fuchsia/service_directory_test_base.h"
+#include "base/location.h"
+#include "base/run_loop.h"
+#include "base/task_runner.h"
+#include "base/test/test_timeouts.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace fuchsia {
+
+class ServiceDirectoryTest : public ServiceDirectoryTestBase {};
+
+// Verifies that ComponentContext can consume a public service in
+// ServiceDirectory and that connection is disconnected when the client stub is
+// destroyed.
+TEST_F(ServiceDirectoryTest, ConnectDisconnect) {
+ auto stub = client_context_->ConnectToService<test_fidl::TestInterface>();
+ VerifyTestInterface(&stub, false);
+
+ base::RunLoop run_loop;
+ service_binding_->SetOnLastClientCallback(run_loop.QuitClosure());
+
+ base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+ FROM_HERE,
+ base::BindOnce(
+ [](base::RunLoop* run_loop) {
+ ADD_FAILURE();
+ run_loop->Quit();
+ },
+ &run_loop),
+ TestTimeouts::action_timeout());
+
+ stub.Unbind();
+ run_loop.Run();
+}
+
+// Verifies that we can connect to the service service more than once.
+TEST_F(ServiceDirectoryTest, ConnectMulti) {
+ auto stub = client_context_->ConnectToService<test_fidl::TestInterface>();
+ auto stub2 = client_context_->ConnectToService<test_fidl::TestInterface>();
+ VerifyTestInterface(&stub, false);
+ VerifyTestInterface(&stub2, false);
+}
+
+// Verify that services are also exported to the legacy flat service namespace.
+TEST_F(ServiceDirectoryTest, ConnectLegacy) {
+ ConnectClientContextToDirectory(".");
+ auto stub = client_context_->ConnectToService<test_fidl::TestInterface>();
+ VerifyTestInterface(&stub, false);
+}
+
+// Verify that ComponentContext can handle the case when the service directory
+// connection is disconnected.
+TEST_F(ServiceDirectoryTest, DirectoryGone) {
+ service_binding_.reset();
+ service_directory_.reset();
+
+ fidl::InterfacePtr<test_fidl::TestInterface> stub;
+ zx_status_t status =
+ client_context_->ConnectToService(FidlInterfaceRequest(&stub));
+ EXPECT_EQ(status, ZX_ERR_PEER_CLOSED);
+
+ VerifyTestInterface(&stub, true);
+}
+
+// Verify that the case when the service doesn't exist is handled properly.
+TEST_F(ServiceDirectoryTest, NoService) {
+ service_binding_.reset();
+ auto stub = client_context_->ConnectToService<test_fidl::TestInterface>();
+ VerifyTestInterface(&stub, true);
+}
+
+} // namespace fuchsia
+} // namespace base
diff --git a/chromium/base/fuchsia/services_directory_unittest.cc b/chromium/base/fuchsia/services_directory_unittest.cc
deleted file mode 100644
index c406f5ce650..00000000000
--- a/chromium/base/fuchsia/services_directory_unittest.cc
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/fuchsia/services_directory.h"
-
-#include <fdio/util.h>
-
-#include "base/bind.h"
-#include "base/fuchsia/component_context.h"
-#include "base/fuchsia/scoped_service_binding.h"
-#include "base/fuchsia/test_fidl/cpp/fidl.h"
-#include "base/message_loop/message_loop.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace base {
-namespace fuchsia {
-
-class TestInterfaceImpl : public test_fidl::TestInterface {
- public:
- void Add(int32_t a, int32_t b, AddCallback callback) override {
- callback(a + b);
- }
-};
-
-// Verifies that a service connected by ServicesDirectory can be imported from
-// another ServicesDirectory.
-TEST(ServicesDirectoryTest, Connect) {
- MessageLoopForIO message_loop_;
-
- ScopedZxHandle dir_service_handle;
- ScopedZxHandle dir_client_handle;
- ASSERT_EQ(zx_channel_create(0, dir_service_handle.receive(),
- dir_client_handle.receive()),
- ZX_OK);
-
- // Mount service dir and publish the service.
- ServicesDirectory service_dir(std::move(dir_service_handle));
- TestInterfaceImpl test_service;
- ScopedServiceBinding<test_fidl::TestInterface> service_binding(&service_dir,
- &test_service);
-
- // Open public directory from the service directory.
- ScopedZxHandle public_dir_service_handle;
- ScopedZxHandle public_dir_client_handle;
- ASSERT_EQ(zx_channel_create(0, public_dir_service_handle.receive(),
- public_dir_client_handle.receive()),
- ZX_OK);
- ASSERT_EQ(fdio_open_at(dir_client_handle.get(), "public", 0,
- public_dir_service_handle.release()),
- ZX_OK);
-
- // Create ComponentContext and connect to the test service.
- ComponentContext client_context(std::move(public_dir_client_handle));
- auto stub = client_context.ConnectToService<test_fidl::TestInterface>();
-
- // Call the service and wait for response.
- base::RunLoop run_loop;
- bool error = false;
-
- stub.set_error_handler([&run_loop, &error]() {
- error = true;
- run_loop.Quit();
- });
-
- stub->Add(2, 2, [&run_loop](int32_t result) {
- EXPECT_EQ(result, 4);
- run_loop.Quit();
- });
-
- run_loop.Run();
-
- EXPECT_FALSE(error);
-}
-
-} // namespace fuchsia
-} // namespace base
diff --git a/chromium/base/i18n/break_iterator_unittest.cc b/chromium/base/i18n/break_iterator_unittest.cc
index 6137e02bd77..ed5de448a40 100644
--- a/chromium/base/i18n/break_iterator_unittest.cc
+++ b/chromium/base/i18n/break_iterator_unittest.cc
@@ -8,8 +8,10 @@
#include "base/macros.h"
#include "base/strings/string_piece.h"
+#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "base/strings/utf_string_conversions.h"
+#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
@@ -66,7 +68,7 @@ TEST(BreakIteratorTest, BreakWord) {
EXPECT_FALSE(iter.IsWord());
}
-TEST(BreakIteratorTest, BreakWide16) {
+TEST(BreakIteratorTest, BreakWordWide16) {
// Two greek words separated by space.
const string16 str(WideToUTF16(
L"\x03a0\x03b1\x03b3\x03ba\x03cc\x03c3\x03bc\x03b9"
@@ -90,7 +92,7 @@ TEST(BreakIteratorTest, BreakWide16) {
EXPECT_FALSE(iter.IsWord());
}
-TEST(BreakIteratorTest, BreakWide32) {
+TEST(BreakIteratorTest, BreakWordWide32) {
// U+1D49C MATHEMATICAL SCRIPT CAPITAL A
const char very_wide_char[] = "\xF0\x9D\x92\x9C";
const string16 str(
@@ -114,6 +116,127 @@ TEST(BreakIteratorTest, BreakWide32) {
EXPECT_FALSE(iter.IsWord());
}
+TEST(BreakIteratorTest, BreakWordThai) {
+ // Terms in Thai, without spaces in between.
+ const char term1[] = "พิมพ์";
+ const char term2[] = "น้อย";
+ const char term3[] = "ลง";
+ const string16 str(UTF8ToUTF16(base::JoinString({term1, term2, term3}, "")));
+
+ BreakIterator iter(str, BreakIterator::BREAK_WORD);
+ ASSERT_TRUE(iter.Init());
+ EXPECT_TRUE(iter.Advance());
+ EXPECT_TRUE(iter.IsWord());
+ EXPECT_EQ(UTF8ToUTF16(term1), iter.GetString());
+ EXPECT_TRUE(iter.Advance());
+ EXPECT_TRUE(iter.IsWord());
+ EXPECT_EQ(UTF8ToUTF16(term2), iter.GetString());
+ EXPECT_TRUE(iter.Advance());
+ EXPECT_TRUE(iter.IsWord());
+ EXPECT_EQ(UTF8ToUTF16(term3), iter.GetString());
+ EXPECT_FALSE(iter.Advance());
+ EXPECT_FALSE(iter.IsWord());
+}
+
+// In some languages, the words are not broken by spaces. ICU provides a huge
+// dictionary to detect word boundaries in Thai, Chinese, Japanese, Burmese,
+// and Khmer. Due to the size of such a table, the part for Chinese and
+// Japanese is not shipped on mobile.
+#if !(defined(OS_IOS) || defined(OS_ANDROID))
+
+TEST(BreakIteratorTest, BreakWordChinese) {
+ // Terms in Traditional Chinese, without spaces in between.
+ const char term1[] = "瀏覽";
+ const char term2[] = "速度";
+ const char term3[] = "飛快";
+ const string16 str(UTF8ToUTF16(base::JoinString({term1, term2, term3}, "")));
+
+ BreakIterator iter(str, BreakIterator::BREAK_WORD);
+ ASSERT_TRUE(iter.Init());
+ EXPECT_TRUE(iter.Advance());
+ EXPECT_TRUE(iter.IsWord());
+ EXPECT_EQ(UTF8ToUTF16(term1), iter.GetString());
+ EXPECT_TRUE(iter.Advance());
+ EXPECT_TRUE(iter.IsWord());
+ EXPECT_EQ(UTF8ToUTF16(term2), iter.GetString());
+ EXPECT_TRUE(iter.Advance());
+ EXPECT_TRUE(iter.IsWord());
+ EXPECT_EQ(UTF8ToUTF16(term3), iter.GetString());
+ EXPECT_FALSE(iter.Advance());
+ EXPECT_FALSE(iter.IsWord());
+}
+
+TEST(BreakIteratorTest, BreakWordJapanese) {
+ // Terms in Japanese, without spaces in between.
+ const char term1[] = "モバイル";
+ const char term2[] = "でも";
+ const string16 str(UTF8ToUTF16(base::JoinString({term1, term2}, "")));
+
+ BreakIterator iter(str, BreakIterator::BREAK_WORD);
+ ASSERT_TRUE(iter.Init());
+ EXPECT_TRUE(iter.Advance());
+ EXPECT_TRUE(iter.IsWord());
+ EXPECT_EQ(UTF8ToUTF16(term1), iter.GetString());
+ EXPECT_TRUE(iter.Advance());
+ EXPECT_TRUE(iter.IsWord());
+ EXPECT_EQ(UTF8ToUTF16(term2), iter.GetString());
+ EXPECT_FALSE(iter.Advance());
+ EXPECT_FALSE(iter.IsWord());
+}
+
+TEST(BreakIteratorTest, BreakWordChineseEnglish) {
+ // Terms in Simplified Chinese mixed with English and wide punctuations.
+ string16 space(UTF8ToUTF16(" "));
+ const char token1[] = "下载";
+ const char token2[] = "Chrome";
+ const char token3[] = "(";
+ const char token4[] = "Mac";
+ const char token5[] = "版";
+ const char token6[] = ")";
+ const string16 str(UTF8ToUTF16(base::JoinString(
+ {token1, " ", token2, token3, token4, " ", token5, token6}, "")));
+
+ BreakIterator iter(str, BreakIterator::BREAK_WORD);
+ ASSERT_TRUE(iter.Init());
+
+ EXPECT_TRUE(iter.Advance());
+ EXPECT_TRUE(iter.IsWord());
+ EXPECT_EQ(UTF8ToUTF16(token1), iter.GetString());
+
+ EXPECT_TRUE(iter.Advance());
+ EXPECT_FALSE(iter.IsWord());
+ EXPECT_EQ(space, iter.GetString());
+
+ EXPECT_TRUE(iter.Advance());
+ EXPECT_TRUE(iter.IsWord());
+ EXPECT_EQ(UTF8ToUTF16(token2), iter.GetString());
+
+ EXPECT_TRUE(iter.Advance());
+ EXPECT_FALSE(iter.IsWord());
+ EXPECT_EQ(UTF8ToUTF16(token3), iter.GetString());
+
+ EXPECT_TRUE(iter.Advance());
+ EXPECT_TRUE(iter.IsWord());
+ EXPECT_EQ(UTF8ToUTF16(token4), iter.GetString());
+
+ EXPECT_TRUE(iter.Advance());
+ EXPECT_FALSE(iter.IsWord());
+ EXPECT_EQ(space, iter.GetString());
+
+ EXPECT_TRUE(iter.Advance());
+ EXPECT_TRUE(iter.IsWord());
+ EXPECT_EQ(UTF8ToUTF16(token5), iter.GetString());
+
+ EXPECT_TRUE(iter.Advance());
+ EXPECT_FALSE(iter.IsWord());
+ EXPECT_EQ(UTF8ToUTF16(token6), iter.GetString());
+
+ EXPECT_FALSE(iter.Advance());
+ EXPECT_FALSE(iter.IsWord());
+}
+
+#endif // !(defined(OS_IOS) || defined(OS_ANDROID))
+
TEST(BreakIteratorTest, BreakSpaceEmpty) {
string16 empty;
BreakIterator iter(empty, BreakIterator::BREAK_SPACE);
diff --git a/chromium/base/i18n/icu_util.cc b/chromium/base/i18n/icu_util.cc
index bc08ecb1e39..4d588c6160d 100644
--- a/chromium/base/i18n/icu_util.cc
+++ b/chromium/base/i18n/icu_util.cc
@@ -63,6 +63,14 @@ bool g_called_once = false;
#if ICU_UTIL_DATA_IMPL == ICU_UTIL_DATA_FILE
+// To debug http://crbug.com/445616.
+int g_debug_icu_last_error;
+int g_debug_icu_load;
+int g_debug_icu_pf_error_details;
+int g_debug_icu_pf_last_error;
+#if defined(OS_WIN)
+wchar_t g_debug_icu_pf_filename[_MAX_PATH];
+#endif // OS_WIN
// Use an unversioned file name to simplify a icu version update down the road.
// No need to change the filename in multiple places (gyp files, windows
// build pkg configurations, etc). 'l' stands for Little Endian.
@@ -98,8 +106,22 @@ void LazyInitIcuDataFile() {
LOG(ERROR) << "Can't find " << kIcuDataFileName;
return;
}
+#if defined(OS_WIN)
+ // TODO(brucedawson): http://crbug.com/445616
+ wchar_t tmp_buffer[_MAX_PATH] = {0};
+ wcscpy_s(tmp_buffer, data_path.value().c_str());
+ debug::Alias(tmp_buffer);
+#endif
data_path = data_path.AppendASCII(kIcuDataFileName);
-#else
+
+#if defined(OS_WIN)
+ // TODO(brucedawson): http://crbug.com/445616
+ wchar_t tmp_buffer2[_MAX_PATH] = {0};
+ wcscpy_s(tmp_buffer2, data_path.value().c_str());
+ debug::Alias(tmp_buffer2);
+#endif
+
+#else // !defined(OS_MACOSX)
// Assume it is in the framework bundle's Resources directory.
ScopedCFTypeRef<CFStringRef> data_file_name(
SysUTF8ToCFStringRef(kIcuDataFileName));
@@ -117,9 +139,24 @@ void LazyInitIcuDataFile() {
#endif // !defined(OS_MACOSX)
File file(data_path, File::FLAG_OPEN | File::FLAG_READ);
if (file.IsValid()) {
+ // TODO(brucedawson): http://crbug.com/445616.
+ g_debug_icu_pf_last_error = 0;
+ g_debug_icu_pf_error_details = 0;
+#if defined(OS_WIN)
+ g_debug_icu_pf_filename[0] = 0;
+#endif // OS_WIN
+
g_icudtl_pf = file.TakePlatformFile();
g_icudtl_region = MemoryMappedFile::Region::kWholeFile;
}
+#if defined(OS_WIN)
+ else {
+ // TODO(brucedawson): http://crbug.com/445616.
+ g_debug_icu_pf_last_error = ::GetLastError();
+ g_debug_icu_pf_error_details = file.error_details();
+ wcscpy_s(g_debug_icu_pf_filename, data_path.value().c_str());
+ }
+#endif // OS_WIN
}
bool InitializeICUWithFileDescriptorInternal(
@@ -127,15 +164,18 @@ bool InitializeICUWithFileDescriptorInternal(
const MemoryMappedFile::Region& data_region) {
// This can be called multiple times in tests.
if (g_icudtl_mapped_file) {
+ g_debug_icu_load = 0; // To debug http://crbug.com/445616.
return true;
}
if (data_fd == kInvalidPlatformFile) {
+ g_debug_icu_load = 1; // To debug http://crbug.com/445616.
LOG(ERROR) << "Invalid file descriptor to ICU data received.";
return false;
}
std::unique_ptr<MemoryMappedFile> icudtl_mapped_file(new MemoryMappedFile());
if (!icudtl_mapped_file->Initialize(File(data_fd), data_region)) {
+ g_debug_icu_load = 2; // To debug http://crbug.com/445616.
LOG(ERROR) << "Couldn't mmap icu data file";
return false;
}
@@ -143,8 +183,12 @@ bool InitializeICUWithFileDescriptorInternal(
UErrorCode err = U_ZERO_ERROR;
udata_setCommonData(const_cast<uint8_t*>(g_icudtl_mapped_file->data()), &err);
+ if (err != U_ZERO_ERROR) {
+ g_debug_icu_load = 3; // To debug http://crbug.com/445616.
+ g_debug_icu_last_error = err;
+ }
#if defined(OS_ANDROID)
- if (err == U_ZERO_ERROR) {
+ else {
// On Android, we can't leave it up to ICU to set the default timezone
// because ICU's timezone detection does not work in many timezones (e.g.
// Australia/Sydney, Asia/Seoul, Europe/Paris ). Use JNI to detect the host
@@ -250,6 +294,20 @@ bool InitializeICU() {
LazyInitIcuDataFile();
result =
InitializeICUWithFileDescriptorInternal(g_icudtl_pf, g_icudtl_region);
+#if defined(OS_WIN)
+ int debug_icu_load = g_debug_icu_load;
+ debug::Alias(&debug_icu_load);
+ int debug_icu_last_error = g_debug_icu_last_error;
+ debug::Alias(&debug_icu_last_error);
+ int debug_icu_pf_last_error = g_debug_icu_pf_last_error;
+ debug::Alias(&debug_icu_pf_last_error);
+ int debug_icu_pf_error_details = g_debug_icu_pf_error_details;
+ debug::Alias(&debug_icu_pf_error_details);
+ wchar_t debug_icu_pf_filename[_MAX_PATH] = {0};
+ wcscpy_s(debug_icu_pf_filename, g_debug_icu_pf_filename);
+ debug::Alias(&debug_icu_pf_filename);
+ CHECK(result); // TODO(brucedawson): http://crbug.com/445616
+#endif
#endif
// To respond to the timezone change properly, the default timezone
diff --git a/chromium/base/i18n/number_formatting_unittest.cc b/chromium/base/i18n/number_formatting_unittest.cc
index 045bc0e18ce..d2eb5687963 100644
--- a/chromium/base/i18n/number_formatting_unittest.cc
+++ b/chromium/base/i18n/number_formatting_unittest.cc
@@ -98,20 +98,24 @@ TEST(NumberFormattingTest, FormatPercent) {
static const struct {
int64_t number;
const char* expected_english;
- const wchar_t* expected_german; // Note: Space before % isn't \x20.
+ const char* expected_german; // Note: Space before % isn't \x20.
// Note: Eastern Arabic-Indic digits (U+06Fx) for Persian and
- // Arabic-Indic digits (U+066x) for Arabic.
+ // Arabic-Indic digits (U+066x) for Arabic in Egypt(ar-EG). In Arabic (ar),
+ // uses European digits (Google-patch).
// See https://unicode.org/cldr/trac/ticket/9040 for details.
// See also https://unicode.org/cldr/trac/ticket/10176 .
// For now, take what CLDR 32 has (percent sign to the right of
// a number in Persian).
- const wchar_t* expected_persian;
- const wchar_t* expected_arabic;
+ const char* expected_persian;
+ const char* expected_arabic;
+ const char* expected_arabic_egypt;
} cases[] = {
- {0, "0%", L"0\xa0%", L"\x6f0\x66a", L"\x660\x66a\x61c"},
- {42, "42%", L"42\xa0%", L"\x6f4\x6f2\x66a", L"\x664\x662\x66a\x61c"},
- {1024, "1,024%", L"1.024\xa0%", L"\x6f1\x66c\x6f0\x6f2\x6f4\x66a",
- L"\x661\x66c\x660\x662\x664\x66a\x61c"},
+ {0, "0%", u8"0\u00a0%", u8"\u06f0\u066a", u8"0\u200e%\u200e",
+ u8"\u0660\u066a\u061c"},
+ {42, "42%", "42\u00a0%", u8"\u06f4\u06f2\u066a", u8"42\u200e%\u200e",
+ "\u0664\u0662\u066a\u061c"},
+ {1024, "1,024%", "1.024\u00a0%", u8"\u06f1\u066c\u06f0\u06f2\u06f4\u066a",
+ "1,024\u200e%\u200e", "\u0661\u066c\u0660\u0662\u0664\u066a\u061c"},
};
test::ScopedRestoreICUDefaultLocale restore_locale;
@@ -120,13 +124,16 @@ TEST(NumberFormattingTest, FormatPercent) {
EXPECT_EQ(ASCIIToUTF16(cases[i].expected_english),
FormatPercent(cases[i].number));
i18n::SetICUDefaultLocale("de");
- EXPECT_EQ(WideToUTF16(cases[i].expected_german),
+ EXPECT_EQ(UTF8ToUTF16(cases[i].expected_german),
FormatPercent(cases[i].number));
i18n::SetICUDefaultLocale("fa");
- EXPECT_EQ(WideToUTF16(cases[i].expected_persian),
+ EXPECT_EQ(UTF8ToUTF16(cases[i].expected_persian),
FormatPercent(cases[i].number));
i18n::SetICUDefaultLocale("ar");
- EXPECT_EQ(WideToUTF16(cases[i].expected_arabic),
+ EXPECT_EQ(UTF8ToUTF16(cases[i].expected_arabic),
+ FormatPercent(cases[i].number));
+ i18n::SetICUDefaultLocale("ar-EG");
+ EXPECT_EQ(UTF8ToUTF16(cases[i].expected_arabic_egypt),
FormatPercent(cases[i].number));
}
}
diff --git a/chromium/base/i18n/rtl.cc b/chromium/base/i18n/rtl.cc
index bba0d449cb6..5a8db8ae1c8 100644
--- a/chromium/base/i18n/rtl.cc
+++ b/chromium/base/i18n/rtl.cc
@@ -154,6 +154,11 @@ bool IsRTL() {
return ICUIsRTL();
}
+void SetRTLForTesting(bool rtl) {
+ SetICUDefaultLocale(rtl ? "he" : "en");
+ DCHECK_EQ(rtl, IsRTL());
+}
+
bool ICUIsRTL() {
if (g_icu_text_direction == UNKNOWN_DIRECTION) {
const icu::Locale& locale = icu::Locale::getDefault();
diff --git a/chromium/base/i18n/rtl.h b/chromium/base/i18n/rtl.h
index 53259709057..e54f8ea3556 100644
--- a/chromium/base/i18n/rtl.h
+++ b/chromium/base/i18n/rtl.h
@@ -54,6 +54,9 @@ BASE_I18N_EXPORT void SetICUDefaultLocale(const std::string& locale_string);
// Returns true if the application text direction is right-to-left.
BASE_I18N_EXPORT bool IsRTL();
+// A test utility function to set the application default text direction.
+BASE_I18N_EXPORT void SetRTLForTesting(bool rtl);
+
// Returns whether the text direction for the default ICU locale is RTL. This
// assumes that SetICUDefaultLocale has been called to set the default locale to
// the UI locale of Chrome.
diff --git a/chromium/base/i18n/rtl_unittest.cc b/chromium/base/i18n/rtl_unittest.cc
index 313d2b440de..fbdd1a10ab4 100644
--- a/chromium/base/i18n/rtl_unittest.cc
+++ b/chromium/base/i18n/rtl_unittest.cc
@@ -23,17 +23,6 @@
namespace base {
namespace i18n {
-namespace {
-
-// A test utility function to set the application default text direction.
-void SetRTL(bool rtl) {
- // Override the current locale/direction.
- SetICUDefaultLocale(rtl ? "he" : "en");
- EXPECT_EQ(rtl, IsRTL());
-}
-
-} // namespace
-
class RTLTest : public PlatformTest {
};
@@ -314,7 +303,7 @@ TEST_F(RTLTest, WrapString) {
test::ScopedRestoreICUDefaultLocale restore_locale;
for (size_t i = 0; i < 2; ++i) {
// Toggle the application default text direction (to try each direction).
- SetRTL(!IsRTL());
+ SetRTLForTesting(!IsRTL());
string16 empty;
WrapStringWithLTRFormatting(&empty);
@@ -362,7 +351,7 @@ TEST_F(RTLTest, GetDisplayStringInLTRDirectionality) {
test::ScopedRestoreICUDefaultLocale restore_locale;
for (size_t i = 0; i < 2; ++i) {
// Toggle the application default text direction (to try each direction).
- SetRTL(!IsRTL());
+ SetRTLForTesting(!IsRTL());
for (size_t i = 0; i < arraysize(cases); ++i) {
string16 input = WideToUTF16(cases[i].path);
string16 output = GetDisplayStringInLTRDirectionality(input);
@@ -449,7 +438,7 @@ TEST_F(RTLTest, UnadjustStringForLocaleDirection) {
test::ScopedRestoreICUDefaultLocale restore_locale;
for (size_t i = 0; i < 2; ++i) {
// Toggle the application default text direction (to try each direction).
- SetRTL(!IsRTL());
+ SetRTLForTesting(!IsRTL());
for (size_t i = 0; i < arraysize(cases); ++i) {
string16 test_case = WideToUTF16(cases[i]);
@@ -500,7 +489,7 @@ TEST_F(RTLTest, EnsureTerminatedDirectionalFormatting) {
test::ScopedRestoreICUDefaultLocale restore_locale;
for (size_t i = 0; i < 2; ++i) {
// Toggle the application default text direction (to try each direction).
- SetRTL(!IsRTL());
+ SetRTLForTesting(!IsRTL());
for (size_t i = 0; i < arraysize(cases); ++i) {
string16 unsanitized_text = WideToUTF16(cases[i].unformated_text);
string16 sanitized_text = WideToUTF16(cases[i].formatted_text);
diff --git a/chromium/base/i18n/time_formatting_unittest.cc b/chromium/base/i18n/time_formatting_unittest.cc
index 027b7c949da..29b25972d29 100644
--- a/chromium/base/i18n/time_formatting_unittest.cc
+++ b/chromium/base/i18n/time_formatting_unittest.cc
@@ -187,8 +187,7 @@ TEST(TimeFormattingTest, TimeFormatTimeOfDayJP) {
}
TEST(TimeFormattingTest, TimeFormatTimeOfDayDE) {
- // Test for a locale that uses different mark than "AM" and "PM".
- // As an instance, we use third_party/icu/source/data/locales/de.txt.
+ // German uses 24h by default, but uses 'AM', 'PM' for 12h format.
test::ScopedRestoreICUDefaultLocale restore_locale;
i18n::SetICUDefaultLocale("de");
ScopedRestoreDefaultTimezone la_time("America/Los_Angeles");
@@ -196,7 +195,7 @@ TEST(TimeFormattingTest, TimeFormatTimeOfDayDE) {
Time time;
EXPECT_TRUE(Time::FromUTCExploded(kTestDateTimeExploded, &time));
string16 clock24h(ASCIIToUTF16("15:42"));
- string16 clock12h_pm(UTF8ToUTF16("3:42 nachm."));
+ string16 clock12h_pm(UTF8ToUTF16("3:42 PM"));
string16 clock12h(ASCIIToUTF16("3:42"));
// The default is 24h clock.
diff --git a/chromium/base/ios/ios_util.h b/chromium/base/ios/ios_util.h
index 2464b1cc15b..91b045afb33 100644
--- a/chromium/base/ios/ios_util.h
+++ b/chromium/base/ios/ios_util.h
@@ -19,6 +19,9 @@ BASE_EXPORT bool IsRunningOnIOS10OrLater();
// Returns whether the operating system is iOS 11 or later.
BASE_EXPORT bool IsRunningOnIOS11OrLater();
+// Returns whether the operating system is iOS 12 or later.
+BASE_EXPORT bool IsRunningOnIOS12OrLater();
+
// Returns whether the operating system is at the given version or later.
BASE_EXPORT bool IsRunningOnOrLater(int32_t major,
int32_t minor,
diff --git a/chromium/base/ios/ios_util.mm b/chromium/base/ios/ios_util.mm
index 2402d30ab06..eba1b7107b7 100644
--- a/chromium/base/ios/ios_util.mm
+++ b/chromium/base/ios/ios_util.mm
@@ -38,6 +38,11 @@ bool IsRunningOnIOS11OrLater() {
return is_running_on_or_later;
}
+bool IsRunningOnIOS12OrLater() {
+ static const bool is_running_on_or_later = IsRunningOnOrLater(12, 0, 0);
+ return is_running_on_or_later;
+}
+
bool IsRunningOnOrLater(int32_t major, int32_t minor, int32_t bug_fix) {
static const int32_t* current_version = OSVersionAsArray();
int32_t version[] = {major, minor, bug_fix};
diff --git a/chromium/base/json/json_reader_fuzzer.cc b/chromium/base/json/json_reader_fuzzer.cc
index a8490da179f..5e69940e67e 100644
--- a/chromium/base/json/json_reader_fuzzer.cc
+++ b/chromium/base/json/json_reader_fuzzer.cc
@@ -5,9 +5,6 @@
#include "base/json/json_reader.h"
#include "base/values.h"
-int error_code, error_line, error_column;
-std::string error_message;
-
// Entry point for LibFuzzer.
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
if (size < 2)
@@ -21,6 +18,9 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
base::StringPiece input_string(input.get(), size - 1);
const int options = data[size - 1];
+
+ int error_code, error_line, error_column;
+ std::string error_message;
base::JSONReader::ReadAndReturnError(input_string, options, &error_code,
&error_message, &error_line,
&error_column);
diff --git a/chromium/base/json/string_escape_fuzzer.cc b/chromium/base/json/string_escape_fuzzer.cc
index e44bd4fe16b..f4304118c65 100644
--- a/chromium/base/json/string_escape_fuzzer.cc
+++ b/chromium/base/json/string_escape_fuzzer.cc
@@ -6,8 +6,6 @@
#include <memory>
-std::string escaped_string;
-
// Entry point for LibFuzzer.
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
if (size < 2)
@@ -22,6 +20,7 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
memcpy(input.get(), data, actual_size_char8);
base::StringPiece input_string(input.get(), actual_size_char8);
+ std::string escaped_string;
base::EscapeJSONString(input_string, put_in_quotes, &escaped_string);
// Test for wide-strings if available size is even.
@@ -31,6 +30,7 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
size_t actual_size_char16 = actual_size_char8 / 2;
base::StringPiece16 input_string16(
reinterpret_cast<base::char16*>(input.get()), actual_size_char16);
+ escaped_string.clear();
base::EscapeJSONString(input_string16, put_in_quotes, &escaped_string);
return 0;
diff --git a/chromium/base/logging.h b/chromium/base/logging.h
index 29960599a5c..08c1f0fc596 100644
--- a/chromium/base/logging.h
+++ b/chromium/base/logging.h
@@ -903,9 +903,8 @@ const LogSeverity LOG_DCHECK = LOG_FATAL;
#define DCHECK_OP(name, op, val1, val2) \
switch (0) case 0: default: \
if (::logging::CheckOpResult true_if_passed = \
- DCHECK_IS_ON() ? \
::logging::Check##name##Impl((val1), (val2), \
- #val1 " " #op " " #val2) : nullptr) \
+ #val1 " " #op " " #val2)) \
; \
else \
::logging::LogMessage(__FILE__, __LINE__, ::logging::LOG_DCHECK, \
diff --git a/chromium/base/logging_unittest.cc b/chromium/base/logging_unittest.cc
index 9025aaf4e55..9b79f9875da 100644
--- a/chromium/base/logging_unittest.cc
+++ b/chromium/base/logging_unittest.cc
@@ -30,6 +30,15 @@
#endif // OS_WIN
#if defined(OS_FUCHSIA)
+#include <lib/zx/event.h>
+#include <lib/zx/port.h>
+#include <lib/zx/process.h>
+#include <lib/zx/thread.h>
+#include <lib/zx/time.h>
+#include <zircon/process.h>
+#include <zircon/syscalls/debug.h>
+#include <zircon/syscalls/port.h>
+#include <zircon/types.h>
#include "base/fuchsia/fuchsia_logging.h"
#endif
@@ -279,9 +288,128 @@ TEST_F(LoggingTest, CheckCausesDistinctBreakpoints) {
EXPECT_NE(addr1, addr3);
EXPECT_NE(addr2, addr3);
}
+#elif defined(OS_FUCHSIA)
+// CHECK causes a direct crash (without jumping to another function) only in
+// official builds. Unfortunately, continuous test coverage on official builds
+// is lower. Furthermore, since the Fuchsia implementation uses threads, it is
+// not possible to rely on an implementation of CHECK that calls abort(), which
+// takes down the whole process, preventing the thread exception handler from
+// handling the exception. DO_CHECK here falls back on IMMEDIATE_CRASH() in
+// non-official builds, to catch regressions earlier in the CQ.
+#if defined(OFFICIAL_BUILD)
+#define DO_CHECK CHECK
+#else
+#define DO_CHECK(cond) \
+ if (!(cond)) { \
+ IMMEDIATE_CRASH(); \
+ }
+#endif
+
+static const unsigned int kExceptionPortKey = 1u;
+static const unsigned int kThreadEndedPortKey = 2u;
+
+struct thread_data_t {
+ // For signaling the thread ended properly.
+ zx::unowned_event event;
+ // For registering thread termination.
+ zx::unowned_port port;
+ // Location where the thread is expected to crash.
+ int death_location;
+};
+
+void* CrashThread(void* arg) {
+ zx_status_t status;
+
+ thread_data_t* data = (thread_data_t*)arg;
+ int death_location = data->death_location;
+
+ // Register the exception handler on the port.
+ status = zx::thread::self()->bind_exception_port(*data->port,
+ kExceptionPortKey, 0);
+ if (status != ZX_OK) {
+ data->event->signal(0, ZX_USER_SIGNAL_0);
+ return nullptr;
+ }
+
+ DO_CHECK(death_location != 1);
+ DO_CHECK(death_location != 2);
+ DO_CHECK(death_location != 3);
+
+ // We should never reach this point, signal the thread incorrectly ended
+ // properly.
+ data->event->signal(0, ZX_USER_SIGNAL_0);
+ return nullptr;
+}
+
+// Runs the CrashThread function in a separate thread.
+void SpawnCrashThread(int death_location, uintptr_t* child_crash_addr) {
+ zx::port port;
+ zx::event event;
+ zx_status_t status;
+
+ status = zx::port::create(0, &port);
+ ASSERT_EQ(status, ZX_OK);
+ status = zx::event::create(0, &event);
+ ASSERT_EQ(status, ZX_OK);
+
+ // Register the thread ended event on the port.
+ status = event.wait_async(port, kThreadEndedPortKey, ZX_USER_SIGNAL_0,
+ ZX_WAIT_ASYNC_ONCE);
+ ASSERT_EQ(status, ZX_OK);
+
+ // Run the thread.
+ thread_data_t thread_data = {zx::unowned_event(event), zx::unowned_port(port),
+ death_location};
+ pthread_t thread;
+ int ret = pthread_create(&thread, nullptr, CrashThread, &thread_data);
+ ASSERT_EQ(ret, 0);
+
+ // Wait on the port.
+ zx_port_packet_t packet;
+ status = port.wait(zx::time::infinite(), &packet);
+ ASSERT_EQ(status, ZX_OK);
+ // Check the thread did crash and not terminate.
+ ASSERT_EQ(packet.key, kExceptionPortKey);
+
+ // Get the crash address.
+ zx::thread zircon_thread;
+ status = zx::process::self()->get_child(packet.exception.tid,
+ ZX_RIGHT_SAME_RIGHTS, &zircon_thread);
+ ASSERT_EQ(status, ZX_OK);
+ zx_thread_state_general_regs_t buffer;
+ status = zircon_thread.read_state(ZX_THREAD_STATE_GENERAL_REGS, &buffer,
+ sizeof(buffer));
+ ASSERT_EQ(status, ZX_OK);
+#if defined(ARCH_CPU_X86_64)
+ *child_crash_addr = static_cast<uintptr_t>(buffer.rip);
+#elif defined(ARCH_CPU_ARM64)
+ *child_crash_addr = static_cast<uintptr_t>(buffer.pc);
+#else
+#error Unsupported architecture
+#endif
+
+ status = zircon_thread.kill();
+ ASSERT_EQ(status, ZX_OK);
+}
+
+TEST_F(LoggingTest, CheckCausesDistinctBreakpoints) {
+ uintptr_t child_crash_addr_1 = 0;
+ uintptr_t child_crash_addr_2 = 0;
+ uintptr_t child_crash_addr_3 = 0;
+
+ SpawnCrashThread(1, &child_crash_addr_1);
+ SpawnCrashThread(2, &child_crash_addr_2);
+ SpawnCrashThread(3, &child_crash_addr_3);
+
+ ASSERT_NE(0u, child_crash_addr_1);
+ ASSERT_NE(0u, child_crash_addr_2);
+ ASSERT_NE(0u, child_crash_addr_3);
+ ASSERT_NE(child_crash_addr_1, child_crash_addr_2);
+ ASSERT_NE(child_crash_addr_1, child_crash_addr_3);
+ ASSERT_NE(child_crash_addr_2, child_crash_addr_3);
+}
#elif defined(OS_POSIX) && !defined(OS_NACL) && !defined(OS_IOS) && \
- !defined(OS_FUCHSIA) && \
(defined(ARCH_CPU_X86_FAMILY) || defined(ARCH_CPU_ARM_FAMILY))
int g_child_crash_pipe;
diff --git a/chromium/base/mac/bind_objc_block.h b/chromium/base/mac/bind_objc_block.h
deleted file mode 100644
index 9a481ed987d..00000000000
--- a/chromium/base/mac/bind_objc_block.h
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MAC_BIND_OBJC_BLOCK_H_
-#define BASE_MAC_BIND_OBJC_BLOCK_H_
-
-#include <Block.h>
-
-#include "base/bind.h"
-#include "base/callback_forward.h"
-#include "base/compiler_specific.h"
-#include "base/mac/scoped_block.h"
-
-// BindBlock builds a callback from an Objective-C block. Example usages:
-//
-// Closure closure = BindBlock(^{DoSomething();});
-//
-// Callback<int(void)> callback = BindBlock(^{return 42;});
-//
-// Callback<void(const std::string&, const std::string&)> callback =
-// BindBlock(^(const std::string& arg0, const std::string& arg1) {
-// ...
-// });
-//
-// These variadic templates will accommodate any number of arguments, however
-// the underlying templates in bind_internal.h and callback.h are limited to
-// seven total arguments, and the bound block itself is used as one of these
-// arguments, so functionally the templates are limited to binding blocks with
-// zero through six arguments.
-//
-// For code compiled with ARC (automatic reference counting), use BindBlockArc.
-// This is because the method has a different implementation (to avoid over-
-// retaining the block) and need to have a different name not to break the ODR
-// (one definition rule). Another subtle difference is that the implementation
-// will call a different version of ScopedBlock constructor thus the linker must
-// not merge both functions.
-
-namespace base {
-
-namespace internal {
-
-// Helper function to run the block contained in the parameter.
-template<typename R, typename... Args>
-R RunBlock(base::mac::ScopedBlock<R(^)(Args...)> block, Args... args) {
- R(^extracted_block)(Args...) = block.get();
- return extracted_block(args...);
-}
-
-} // namespace internal
-
-#if !defined(__has_feature) || !__has_feature(objc_arc)
-
-// Construct a callback from an objective-C block with up to six arguments (see
-// note above).
-template<typename R, typename... Args>
-base::Callback<R(Args...)> BindBlock(R(^block)(Args...)) {
- return base::Bind(
- &base::internal::RunBlock<R, Args...>,
- base::mac::ScopedBlock<R (^)(Args...)>(
- base::mac::internal::ScopedBlockTraits<R (^)(Args...)>::Retain(
- block)));
-}
-
-#else
-
-// Construct a callback from an objective-C block with up to six arguments (see
-// note above).
-template <typename R, typename... Args>
-base::Callback<R(Args...)> BindBlockArc(R (^block)(Args...)) {
- return base::Bind(&base::internal::RunBlock<R, Args...>,
- base::mac::ScopedBlock<R (^)(Args...)>(block));
-}
-
-#endif
-
-} // namespace base
-
-#endif // BASE_MAC_BIND_OBJC_BLOCK_H_
diff --git a/chromium/base/mac/bind_objc_block_unittest.mm b/chromium/base/mac/bind_objc_block_unittest.mm
index 2b186725edb..b34514670d0 100644
--- a/chromium/base/mac/bind_objc_block_unittest.mm
+++ b/chromium/base/mac/bind_objc_block_unittest.mm
@@ -2,20 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#import "base/mac/bind_objc_block.h"
-
#include <string>
#include "base/bind.h"
#include "base/callback.h"
#include "base/callback_helpers.h"
+#include "base/mac/scoped_nsautorelease_pool.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "testing/gtest_mac.h"
#if defined(OS_IOS)
#include "base/ios/weak_nsobject.h"
-#include "base/mac/scoped_nsautorelease_pool.h"
#endif
namespace {
@@ -24,9 +22,9 @@ TEST(BindObjcBlockTest, TestScopedClosureRunnerExitScope) {
int run_count = 0;
int* ptr = &run_count;
{
- base::ScopedClosureRunner runner(base::BindBlock(^{
- (*ptr)++;
- }));
+ base::ScopedClosureRunner runner(base::BindOnce(base::RetainBlock(^{
+ (*ptr)++;
+ })));
EXPECT_EQ(0, run_count);
}
EXPECT_EQ(1, run_count);
@@ -37,9 +35,9 @@ TEST(BindObjcBlockTest, TestScopedClosureRunnerRelease) {
int* ptr = &run_count;
base::OnceClosure c;
{
- base::ScopedClosureRunner runner(base::BindBlock(^{
- (*ptr)++;
- }));
+ base::ScopedClosureRunner runner(base::BindOnce(base::RetainBlock(^{
+ (*ptr)++;
+ })));
c = runner.Release();
EXPECT_EQ(0, run_count);
}
@@ -50,39 +48,42 @@ TEST(BindObjcBlockTest, TestScopedClosureRunnerRelease) {
TEST(BindObjcBlockTest, TestReturnValue) {
const int kReturnValue = 42;
- base::Callback<int(void)> c = base::BindBlock(^{return kReturnValue;});
- EXPECT_EQ(kReturnValue, c.Run());
+ base::OnceCallback<int(void)> c = base::BindOnce(base::RetainBlock(^{
+ return kReturnValue;
+ }));
+ EXPECT_EQ(kReturnValue, std::move(c).Run());
}
TEST(BindObjcBlockTest, TestArgument) {
const int kArgument = 42;
- base::Callback<int(int)> c = base::BindBlock(^(int a){return a + 1;});
- EXPECT_EQ(kArgument + 1, c.Run(kArgument));
+ base::OnceCallback<int(int)> c = base::BindOnce(base::RetainBlock(^(int a) {
+ return a + 1;
+ }));
+ EXPECT_EQ(kArgument + 1, std::move(c).Run(kArgument));
}
TEST(BindObjcBlockTest, TestTwoArguments) {
std::string result;
std::string* ptr = &result;
- base::Callback<void(const std::string&, const std::string&)> c =
- base::BindBlock(^(const std::string& a, const std::string& b) {
- *ptr = a + b;
- });
- c.Run("forty", "two");
+ base::OnceCallback<void(const std::string&, const std::string&)> c =
+ base::BindOnce(
+ base::RetainBlock(^(const std::string& a, const std::string& b) {
+ *ptr = a + b;
+ }));
+ std::move(c).Run("forty", "two");
EXPECT_EQ(result, "fortytwo");
}
TEST(BindObjcBlockTest, TestThreeArguments) {
std::string result;
std::string* ptr = &result;
- base::Callback<void(const std::string&,
- const std::string&,
- const std::string&)> c =
- base::BindBlock(^(const std::string& a,
- const std::string& b,
- const std::string& c) {
- *ptr = a + b + c;
- });
- c.Run("six", "times", "nine");
+ base::OnceCallback<void(const std::string&, const std::string&,
+ const std::string&)>
+ c = base::BindOnce(base::RetainBlock(
+ ^(const std::string& a, const std::string& b, const std::string& c) {
+ *ptr = a + b + c;
+ }));
+ std::move(c).Run("six", "times", "nine");
EXPECT_EQ(result, "sixtimesnine");
}
@@ -91,18 +92,49 @@ TEST(BindObjcBlockTest, TestSixArguments) {
std::string* ptr = &result1;
int result2;
int* ptr2 = &result2;
- base::Callback<void(int, int, const std::string&, const std::string&,
- int, const std::string&)> c =
- base::BindBlock(^(int a, int b, const std::string& c,
- const std::string& d, int e, const std::string& f) {
- *ptr = c + d + f;
- *ptr2 = a + b + e;
- });
- c.Run(1, 2, "infinite", "improbability", 3, "drive");
+ base::OnceCallback<void(int, int, const std::string&, const std::string&, int,
+ const std::string&)>
+ c = base::BindOnce(base::RetainBlock(^(int a, int b, const std::string& c,
+ const std::string& d, int e,
+ const std::string& f) {
+ *ptr = c + d + f;
+ *ptr2 = a + b + e;
+ }));
+ std::move(c).Run(1, 2, "infinite", "improbability", 3, "drive");
EXPECT_EQ(result1, "infiniteimprobabilitydrive");
EXPECT_EQ(result2, 6);
}
+TEST(BindObjcBlockTest, TestBlockMoveable) {
+ base::OnceClosure c;
+ __block BOOL invoked_block = NO;
+ {
+ base::mac::ScopedNSAutoreleasePool autorelease_pool;
+ c = base::BindOnce(base::RetainBlock(^(std::unique_ptr<BOOL> v) {
+ invoked_block = *v;
+ }),
+ std::make_unique<BOOL>(YES));
+ };
+ std::move(c).Run();
+ EXPECT_TRUE(invoked_block);
+}
+
+// Tests that the bound block is retained until the end of its execution,
+// even if the callback itself is destroyed during the invocation. It was
+// found that some code depends on this behaviour (see crbug.com/845687).
+TEST(BindObjcBlockTest, TestBlockDeallocation) {
+ base::RepeatingClosure closure;
+ __block BOOL invoked_block = NO;
+ closure = base::BindRepeating(
+ base::RetainBlock(^(base::RepeatingClosure* this_closure) {
+ *this_closure = base::RepeatingClosure();
+ invoked_block = YES;
+ }),
+ &closure);
+ closure.Run();
+ EXPECT_TRUE(invoked_block);
+}
+
#if defined(OS_IOS)
TEST(BindObjcBlockTest, TestBlockReleased) {
@@ -112,9 +144,9 @@ TEST(BindObjcBlockTest, TestBlockReleased) {
NSObject* nsobject = [[[NSObject alloc] init] autorelease];
weak_nsobject.reset(nsobject);
- auto callback = base::BindBlock(^{
+ auto callback = base::BindOnce(base::RetainBlock(^{
[nsobject description];
- });
+ }));
}
EXPECT_NSEQ(nil, weak_nsobject);
}
diff --git a/chromium/base/mac/bind_objc_block_unittest_arc.mm b/chromium/base/mac/bind_objc_block_unittest_arc.mm
index 24ec9748faa..1fd25d7ce15 100644
--- a/chromium/base/mac/bind_objc_block_unittest_arc.mm
+++ b/chromium/base/mac/bind_objc_block_unittest_arc.mm
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#import "base/mac/bind_objc_block.h"
-
#include <string>
#include "base/bind.h"
@@ -23,7 +21,7 @@ TEST(BindObjcBlockTestARC, TestScopedClosureRunnerExitScope) {
int run_count = 0;
int* ptr = &run_count;
{
- base::ScopedClosureRunner runner(base::BindBlockArc(^{
+ base::ScopedClosureRunner runner(base::BindOnce(^{
(*ptr)++;
}));
EXPECT_EQ(0, run_count);
@@ -36,7 +34,7 @@ TEST(BindObjcBlockTestARC, TestScopedClosureRunnerRelease) {
int* ptr = &run_count;
base::OnceClosure c;
{
- base::ScopedClosureRunner runner(base::BindBlockArc(^{
+ base::ScopedClosureRunner runner(base::BindOnce(^{
(*ptr)++;
}));
c = runner.Release();
@@ -49,41 +47,41 @@ TEST(BindObjcBlockTestARC, TestScopedClosureRunnerRelease) {
TEST(BindObjcBlockTestARC, TestReturnValue) {
const int kReturnValue = 42;
- base::Callback<int(void)> c = base::BindBlockArc(^{
+ base::OnceCallback<int(void)> c = base::BindOnce(^{
return kReturnValue;
});
- EXPECT_EQ(kReturnValue, c.Run());
+ EXPECT_EQ(kReturnValue, std::move(c).Run());
}
TEST(BindObjcBlockTestARC, TestArgument) {
const int kArgument = 42;
- base::Callback<int(int)> c = base::BindBlockArc(^(int a) {
+ base::OnceCallback<int(int)> c = base::BindOnce(^(int a) {
return a + 1;
});
- EXPECT_EQ(kArgument + 1, c.Run(kArgument));
+ EXPECT_EQ(kArgument + 1, std::move(c).Run(kArgument));
}
TEST(BindObjcBlockTestARC, TestTwoArguments) {
std::string result;
std::string* ptr = &result;
- base::Callback<void(const std::string&, const std::string&)> c =
- base::BindBlockArc(^(const std::string& a, const std::string& b) {
+ base::OnceCallback<void(const std::string&, const std::string&)> c =
+ base::BindOnce(^(const std::string& a, const std::string& b) {
*ptr = a + b;
});
- c.Run("forty", "two");
+ std::move(c).Run("forty", "two");
EXPECT_EQ(result, "fortytwo");
}
TEST(BindObjcBlockTestARC, TestThreeArguments) {
std::string result;
std::string* ptr = &result;
- base::Callback<void(const std::string&, const std::string&,
- const std::string&)>
- c = base::BindBlockArc(
+ base::OnceCallback<void(const std::string&, const std::string&,
+ const std::string&)>
+ c = base::BindOnce(
^(const std::string& a, const std::string& b, const std::string& c) {
*ptr = a + b + c;
});
- c.Run("six", "times", "nine");
+ std::move(c).Run("six", "times", "nine");
EXPECT_EQ(result, "sixtimesnine");
}
@@ -92,19 +90,48 @@ TEST(BindObjcBlockTestARC, TestSixArguments) {
std::string* ptr = &result1;
int result2;
int* ptr2 = &result2;
- base::Callback<void(int, int, const std::string&, const std::string&, int,
- const std::string&)>
- c = base::BindBlockArc(^(int a, int b, const std::string& c,
- const std::string& d, int e,
- const std::string& f) {
+ base::OnceCallback<void(int, int, const std::string&, const std::string&, int,
+ const std::string&)>
+ c = base::BindOnce(^(int a, int b, const std::string& c,
+ const std::string& d, int e, const std::string& f) {
*ptr = c + d + f;
*ptr2 = a + b + e;
});
- c.Run(1, 2, "infinite", "improbability", 3, "drive");
+ std::move(c).Run(1, 2, "infinite", "improbability", 3, "drive");
EXPECT_EQ(result1, "infiniteimprobabilitydrive");
EXPECT_EQ(result2, 6);
}
+TEST(BindObjcBlockTestARC, TestBlockMoveable) {
+ base::OnceClosure c;
+ __block BOOL invoked_block = NO;
+ @autoreleasepool {
+ c = base::BindOnce(
+ ^(std::unique_ptr<BOOL> v) {
+ invoked_block = *v;
+ },
+ std::make_unique<BOOL>(YES));
+ };
+ std::move(c).Run();
+ EXPECT_TRUE(invoked_block);
+}
+
+// Tests that the bound block is retained until the end of its execution,
+// even if the callback itself is destroyed during the invocation. It was
+// found that some code depends on this behaviour (see crbug.com/845687).
+TEST(BindObjcBlockTestARC, TestBlockDeallocation) {
+ base::RepeatingClosure closure;
+ __block BOOL invoked_block = NO;
+ closure = base::BindRepeating(
+ ^(base::RepeatingClosure* this_closure) {
+ *this_closure = base::RepeatingClosure();
+ invoked_block = YES;
+ },
+ &closure);
+ closure.Run();
+ EXPECT_TRUE(invoked_block);
+}
+
#if defined(OS_IOS)
TEST(BindObjcBlockTestARC, TestBlockReleased) {
@@ -113,7 +140,7 @@ TEST(BindObjcBlockTestARC, TestBlockReleased) {
NSObject* nsobject = [[NSObject alloc] init];
weak_nsobject = nsobject;
- auto callback = base::BindBlockArc(^{
+ auto callback = base::BindOnce(^{
[nsobject description];
});
}
diff --git a/chromium/base/mac/foundation_util.h b/chromium/base/mac/foundation_util.h
index abdfdf30e3d..26a2f18bc84 100644
--- a/chromium/base/mac/foundation_util.h
+++ b/chromium/base/mac/foundation_util.h
@@ -52,9 +52,6 @@ typedef CR_FORWARD_ENUM(unsigned int, NSSearchPathDirectory);
typedef unsigned int NSSearchPathDomainMask;
#endif
-typedef struct OpaqueSecTrustRef* SecACLRef;
-typedef struct OpaqueSecTrustedApplicationRef* SecTrustedApplicationRef;
-
#if defined(OS_IOS)
typedef struct CF_BRIDGED_TYPE(id) __SecKey* SecKeyRef;
typedef struct CF_BRIDGED_TYPE(id) __SecPolicy* SecPolicyRef;
@@ -311,10 +308,8 @@ CF_CAST_DECL(CTFont);
CF_CAST_DECL(CTFontDescriptor);
CF_CAST_DECL(CTRun);
-CF_CAST_DECL(SecACL);
CF_CAST_DECL(SecKey);
CF_CAST_DECL(SecPolicy);
-CF_CAST_DECL(SecTrustedApplication);
#undef CF_CAST_DECL
diff --git a/chromium/base/mac/mac_util.h b/chromium/base/mac/mac_util.h
index 37e5b670cee..f596efb1e01 100644
--- a/chromium/base/mac/mac_util.h
+++ b/chromium/base/mac/mac_util.h
@@ -153,6 +153,12 @@ DEFINE_IS_OS_FUNCS(13, TEST_DEPLOYMENT_TARGET)
DEFINE_IS_OS_FUNCS(13, IGNORE_DEPLOYMENT_TARGET)
#endif
+#ifdef MAC_OS_X_VERSION_10_14
+DEFINE_IS_OS_FUNCS(14, TEST_DEPLOYMENT_TARGET)
+#else
+DEFINE_IS_OS_FUNCS(14, IGNORE_DEPLOYMENT_TARGET)
+#endif
+
#undef IGNORE_DEPLOYMENT_TARGET
#undef TEST_DEPLOYMENT_TARGET
#undef DEFINE_IS_OS_FUNCS
@@ -160,8 +166,8 @@ DEFINE_IS_OS_FUNCS(13, IGNORE_DEPLOYMENT_TARGET)
// This should be infrequently used. It only makes sense to use this to avoid
// codepaths that are very likely to break on future (unreleased, untested,
// unborn) OS releases, or to log when the OS is newer than any known version.
-inline bool IsOSLaterThan10_13_DontCallThis() {
- return !IsAtMostOS10_13();
+inline bool IsOSLaterThan10_14_DontCallThis() {
+ return !IsAtMostOS10_14();
}
// Retrieve the system's model identifier string from the IOKit registry:
diff --git a/chromium/base/mac/mac_util.mm b/chromium/base/mac/mac_util.mm
index a8308be52a9..82b90470169 100644
--- a/chromium/base/mac/mac_util.mm
+++ b/chromium/base/mac/mac_util.mm
@@ -422,7 +422,7 @@ int MacOSXMinorVersionInternal() {
// immediate death.
CHECK(darwin_major_version >= 6);
int mac_os_x_minor_version = darwin_major_version - 4;
- DLOG_IF(WARNING, darwin_major_version > 17)
+ DLOG_IF(WARNING, darwin_major_version > 18)
<< "Assuming Darwin " << base::IntToString(darwin_major_version)
<< " is macOS 10." << base::IntToString(mac_os_x_minor_version);
diff --git a/chromium/base/mac/mac_util_unittest.mm b/chromium/base/mac/mac_util_unittest.mm
index 266d1c42947..6b13949b557 100644
--- a/chromium/base/mac/mac_util_unittest.mm
+++ b/chromium/base/mac/mac_util_unittest.mm
@@ -136,94 +136,176 @@ TEST_F(MacUtilTest, IsOSEllipsis) {
int32_t major, minor, bugfix;
base::SysInfo::OperatingSystemVersionNumbers(&major, &minor, &bugfix);
+ // The patterns here are:
+ // - FALSE/FALSE/TRUE (it is not the earlier version, it is not "at most" the
+ // earlier version, it is "at least" the earlier version)
+ // - TRUE/TRUE/TRUE (it is the same version, it is "at most" the same version,
+ // it is "at least" the same version)
+ // - FALSE/TRUE/FALSE (it is not the later version, it is "at most" the later
+ // version, it is not "at least" the later version)
+
+ // TODO(avi): Is there a better way to test this? Maybe with macros? Are
+ // macros a better way to test this?
+
if (major == 10) {
if (minor == 9) {
EXPECT_TRUE(IsOS10_9());
EXPECT_TRUE(IsAtMostOS10_9());
EXPECT_TRUE(IsAtLeastOS10_9());
+
EXPECT_FALSE(IsOS10_10());
EXPECT_TRUE(IsAtMostOS10_10());
EXPECT_FALSE(IsAtLeastOS10_10());
+
EXPECT_FALSE(IsOS10_11());
EXPECT_TRUE(IsAtMostOS10_11());
EXPECT_FALSE(IsAtLeastOS10_11());
+
EXPECT_FALSE(IsOS10_12());
- EXPECT_FALSE(IsAtLeastOS10_12());
EXPECT_TRUE(IsAtMostOS10_12());
+ EXPECT_FALSE(IsAtLeastOS10_12());
+
EXPECT_FALSE(IsOS10_13());
- EXPECT_FALSE(IsAtLeastOS10_13());
EXPECT_TRUE(IsAtMostOS10_13());
- EXPECT_FALSE(IsOSLaterThan10_13_DontCallThis());
+ EXPECT_FALSE(IsAtLeastOS10_13());
+
+ EXPECT_FALSE(IsOS10_14());
+ EXPECT_TRUE(IsAtMostOS10_14());
+ EXPECT_FALSE(IsAtLeastOS10_14());
+
+ EXPECT_FALSE(IsOSLaterThan10_14_DontCallThis());
} else if (minor == 10) {
EXPECT_FALSE(IsOS10_9());
EXPECT_FALSE(IsAtMostOS10_9());
EXPECT_TRUE(IsAtLeastOS10_9());
+
EXPECT_TRUE(IsOS10_10());
EXPECT_TRUE(IsAtMostOS10_10());
EXPECT_TRUE(IsAtLeastOS10_10());
+
EXPECT_FALSE(IsOS10_11());
EXPECT_TRUE(IsAtMostOS10_11());
EXPECT_FALSE(IsAtLeastOS10_11());
+
EXPECT_FALSE(IsOS10_12());
- EXPECT_FALSE(IsAtLeastOS10_12());
EXPECT_TRUE(IsAtMostOS10_12());
+ EXPECT_FALSE(IsAtLeastOS10_12());
+
EXPECT_FALSE(IsOS10_13());
- EXPECT_FALSE(IsAtLeastOS10_13());
EXPECT_TRUE(IsAtMostOS10_13());
- EXPECT_FALSE(IsOSLaterThan10_13_DontCallThis());
+ EXPECT_FALSE(IsAtLeastOS10_13());
+
+ EXPECT_FALSE(IsOS10_14());
+ EXPECT_TRUE(IsAtMostOS10_14());
+ EXPECT_FALSE(IsAtLeastOS10_14());
+
+ EXPECT_FALSE(IsOSLaterThan10_14_DontCallThis());
} else if (minor == 11) {
EXPECT_FALSE(IsOS10_9());
EXPECT_FALSE(IsAtMostOS10_9());
EXPECT_TRUE(IsAtLeastOS10_9());
+
EXPECT_FALSE(IsOS10_10());
EXPECT_FALSE(IsAtMostOS10_10());
EXPECT_TRUE(IsAtLeastOS10_10());
+
EXPECT_TRUE(IsOS10_11());
EXPECT_TRUE(IsAtMostOS10_11());
EXPECT_TRUE(IsAtLeastOS10_11());
+
EXPECT_FALSE(IsOS10_12());
- EXPECT_FALSE(IsAtLeastOS10_12());
EXPECT_TRUE(IsAtMostOS10_12());
+ EXPECT_FALSE(IsAtLeastOS10_12());
+
EXPECT_FALSE(IsOS10_13());
- EXPECT_FALSE(IsAtLeastOS10_13());
EXPECT_TRUE(IsAtMostOS10_13());
- EXPECT_FALSE(IsOSLaterThan10_13_DontCallThis());
+ EXPECT_FALSE(IsAtLeastOS10_13());
+
+ EXPECT_FALSE(IsOS10_14());
+ EXPECT_TRUE(IsAtMostOS10_14());
+ EXPECT_FALSE(IsAtLeastOS10_14());
+
+ EXPECT_FALSE(IsOSLaterThan10_14_DontCallThis());
} else if (minor == 12) {
EXPECT_FALSE(IsOS10_9());
EXPECT_FALSE(IsAtMostOS10_9());
EXPECT_TRUE(IsAtLeastOS10_9());
+
EXPECT_FALSE(IsOS10_10());
EXPECT_FALSE(IsAtMostOS10_10());
EXPECT_TRUE(IsAtLeastOS10_10());
+
EXPECT_FALSE(IsOS10_11());
EXPECT_FALSE(IsAtMostOS10_11());
EXPECT_TRUE(IsAtLeastOS10_11());
+
EXPECT_TRUE(IsOS10_12());
EXPECT_TRUE(IsAtMostOS10_12());
EXPECT_TRUE(IsAtLeastOS10_12());
+
EXPECT_FALSE(IsOS10_13());
- EXPECT_FALSE(IsAtLeastOS10_13());
EXPECT_TRUE(IsAtMostOS10_13());
- EXPECT_FALSE(IsOSLaterThan10_13_DontCallThis());
+ EXPECT_FALSE(IsAtLeastOS10_13());
+
+ EXPECT_FALSE(IsOS10_14());
+ EXPECT_TRUE(IsAtMostOS10_14());
+ EXPECT_FALSE(IsAtLeastOS10_14());
+
+ EXPECT_FALSE(IsOSLaterThan10_14_DontCallThis());
} else if (minor == 13) {
EXPECT_FALSE(IsOS10_9());
EXPECT_FALSE(IsAtMostOS10_9());
EXPECT_TRUE(IsAtLeastOS10_9());
+
EXPECT_FALSE(IsOS10_10());
EXPECT_FALSE(IsAtMostOS10_10());
EXPECT_TRUE(IsAtLeastOS10_10());
+
EXPECT_FALSE(IsOS10_11());
EXPECT_FALSE(IsAtMostOS10_11());
EXPECT_TRUE(IsAtLeastOS10_11());
+
EXPECT_FALSE(IsOS10_12());
EXPECT_FALSE(IsAtMostOS10_12());
EXPECT_TRUE(IsAtLeastOS10_12());
+
EXPECT_TRUE(IsOS10_13());
- EXPECT_TRUE(IsAtLeastOS10_13());
EXPECT_TRUE(IsAtMostOS10_13());
- EXPECT_FALSE(IsOSLaterThan10_13_DontCallThis());
+ EXPECT_TRUE(IsAtLeastOS10_13());
+
+ EXPECT_FALSE(IsOS10_14());
+ EXPECT_TRUE(IsAtMostOS10_14());
+ EXPECT_FALSE(IsAtLeastOS10_14());
+
+ EXPECT_FALSE(IsOSLaterThan10_14_DontCallThis());
+ } else if (minor == 14) {
+ EXPECT_FALSE(IsOS10_9());
+ EXPECT_FALSE(IsAtMostOS10_9());
+ EXPECT_TRUE(IsAtLeastOS10_9());
+
+ EXPECT_FALSE(IsOS10_10());
+ EXPECT_FALSE(IsAtMostOS10_10());
+ EXPECT_TRUE(IsAtLeastOS10_10());
+
+ EXPECT_FALSE(IsOS10_11());
+ EXPECT_FALSE(IsAtMostOS10_11());
+ EXPECT_TRUE(IsAtLeastOS10_11());
+
+ EXPECT_FALSE(IsOS10_12());
+ EXPECT_FALSE(IsAtMostOS10_12());
+ EXPECT_TRUE(IsAtLeastOS10_12());
+
+ EXPECT_FALSE(IsOS10_13());
+ EXPECT_FALSE(IsAtMostOS10_13());
+ EXPECT_TRUE(IsAtLeastOS10_13());
+
+ EXPECT_TRUE(IsOS10_14());
+ EXPECT_TRUE(IsAtMostOS10_14());
+ EXPECT_TRUE(IsAtLeastOS10_14());
+
+ EXPECT_FALSE(IsOSLaterThan10_14_DontCallThis());
} else {
- // Not nine, ten, eleven, twelve, or thirteen. Ah, ah, ah.
+ // Not nine, ten, eleven, twelve, thirteen, or fourteen. Ah, ah, ah.
EXPECT_TRUE(false);
}
} else {
diff --git a/chromium/base/memory/discardable_shared_memory_unittest.cc b/chromium/base/memory/discardable_shared_memory_unittest.cc
index a7310a72530..b3d21a7bd57 100644
--- a/chromium/base/memory/discardable_shared_memory_unittest.cc
+++ b/chromium/base/memory/discardable_shared_memory_unittest.cc
@@ -436,7 +436,7 @@ TEST(DiscardableSharedMemoryTest, TracingOwnershipEdges) {
base::trace_event::MemoryDumpArgs args = {
base::trace_event::MemoryDumpLevelOfDetail::DETAILED};
- trace_event::ProcessMemoryDump pmd(nullptr, args);
+ trace_event::ProcessMemoryDump pmd(args);
trace_event::MemoryAllocatorDump* client_dump =
pmd.CreateAllocatorDump("discardable_manager/map1");
const bool is_owned = false;
diff --git a/chromium/base/memory/memory_pressure_monitor_mac_unittest.cc b/chromium/base/memory/memory_pressure_monitor_mac_unittest.cc
index ff464fb3c43..3f5f4b764f7 100644
--- a/chromium/base/memory/memory_pressure_monitor_mac_unittest.cc
+++ b/chromium/base/memory/memory_pressure_monitor_mac_unittest.cc
@@ -8,7 +8,7 @@
#include "base/bind_helpers.h"
#include "base/mac/scoped_cftyperef.h"
#include "base/macros.h"
-#include "base/test/histogram_tester.h"
+#include "base/test/metrics/histogram_tester.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
diff --git a/chromium/base/memory/memory_pressure_monitor_unittest.cc b/chromium/base/memory/memory_pressure_monitor_unittest.cc
index e9747418cc8..10d9d2428f6 100644
--- a/chromium/base/memory/memory_pressure_monitor_unittest.cc
+++ b/chromium/base/memory/memory_pressure_monitor_unittest.cc
@@ -6,7 +6,7 @@
#include "base/macros.h"
#include "base/memory/memory_pressure_listener.h"
-#include "base/test/histogram_tester.h"
+#include "base/test/metrics/histogram_tester.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
diff --git a/chromium/base/memory/platform_shared_memory_region.h b/chromium/base/memory/platform_shared_memory_region.h
index 143a1d4ee2e..3d830b6f936 100644
--- a/chromium/base/memory/platform_shared_memory_region.h
+++ b/chromium/base/memory/platform_shared_memory_region.h
@@ -17,7 +17,7 @@
#include <mach/mach.h>
#include "base/mac/scoped_mach_port.h"
#elif defined(OS_FUCHSIA)
-#include "base/fuchsia/scoped_zx_handle.h"
+#include <lib/zx/vmo.h>
#elif defined(OS_WIN)
#include "base/win/scoped_handle.h"
#include "base/win/windows_types.h"
@@ -31,7 +31,7 @@ namespace base {
namespace subtle {
#if defined(OS_POSIX) && (!defined(OS_MACOSX) || defined(OS_IOS)) && \
- !defined(OS_FUCHSIA) && !defined(OS_ANDROID)
+ !defined(OS_ANDROID)
// Helper structs to keep two descriptors on POSIX. It's needed to support
// ConvertToReadOnly().
struct BASE_EXPORT FDPair {
@@ -97,7 +97,7 @@ class BASE_EXPORT PlatformSharedMemoryRegion {
using ScopedPlatformHandle = mac::ScopedMachSendRight;
#elif defined(OS_FUCHSIA)
using PlatformHandle = zx_handle_t;
- using ScopedPlatformHandle = ScopedZxHandle;
+ using ScopedPlatformHandle = zx::vmo;
#elif defined(OS_WIN)
using PlatformHandle = HANDLE;
using ScopedPlatformHandle = win::ScopedHandle;
@@ -173,6 +173,12 @@ class BASE_EXPORT PlatformSharedMemoryRegion {
bool ConvertToReadOnly(void* mapped_addr);
#endif // defined(OS_MACOSX) && !defined(OS_IOS)
+ // Converts the region to unsafe. Returns whether the operation succeeded.
+ // Makes the current instance invalid on failure. Can be called only in
+ // kWritable mode, all other modes will CHECK-fail. The object will have
+ // kUnsafe mode after this call on success.
+ bool ConvertToUnsafe();
+
// Maps |size| bytes of the shared memory region starting with the given
// |offset| into the caller's address space. |offset| must be aligned to value
// of |SysInfo::VMAllocationGranularity()|. Fails if requested bytes are out
diff --git a/chromium/base/memory/platform_shared_memory_region_android.cc b/chromium/base/memory/platform_shared_memory_region_android.cc
index 664d3d4b859..6c92b5e7016 100644
--- a/chromium/base/memory/platform_shared_memory_region_android.cc
+++ b/chromium/base/memory/platform_shared_memory_region_android.cc
@@ -100,6 +100,17 @@ bool PlatformSharedMemoryRegion::ConvertToReadOnly() {
return true;
}
+bool PlatformSharedMemoryRegion::ConvertToUnsafe() {
+ if (!IsValid())
+ return false;
+
+ CHECK_EQ(mode_, Mode::kWritable)
+ << "Only writable shared memory region can be converted to unsafe";
+
+ mode_ = Mode::kUnsafe;
+ return true;
+}
+
bool PlatformSharedMemoryRegion::MapAt(off_t offset,
size_t size,
void** memory,
diff --git a/chromium/base/memory/platform_shared_memory_region_fuchsia.cc b/chromium/base/memory/platform_shared_memory_region_fuchsia.cc
index 5a75845f726..a3e195860a0 100644
--- a/chromium/base/memory/platform_shared_memory_region_fuchsia.cc
+++ b/chromium/base/memory/platform_shared_memory_region_fuchsia.cc
@@ -22,7 +22,7 @@ static constexpr int kNoWriteOrExec =
// static
PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Take(
- ScopedZxHandle handle,
+ zx::vmo handle,
Mode mode,
size_t size,
const UnguessableToken& guid) {
@@ -56,9 +56,8 @@ PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Duplicate() const {
CHECK_NE(mode_, Mode::kWritable)
<< "Duplicating a writable shared memory region is prohibited";
- ScopedZxHandle duped_handle;
- zx_status_t status = zx_handle_duplicate(handle_.get(), ZX_RIGHT_SAME_RIGHTS,
- duped_handle.receive());
+ zx::vmo duped_handle;
+ zx_status_t status = handle_.duplicate(ZX_RIGHT_SAME_RIGHTS, &duped_handle);
if (status != ZX_OK) {
ZX_DLOG(ERROR, status) << "zx_handle_duplicate";
return {};
@@ -75,21 +74,27 @@ bool PlatformSharedMemoryRegion::ConvertToReadOnly() {
CHECK_EQ(mode_, Mode::kWritable)
<< "Only writable shared memory region can be converted to read-only";
- ScopedZxHandle old_handle(handle_.release());
- ScopedZxHandle new_handle;
- zx_status_t status =
- zx_handle_replace(old_handle.get(), kNoWriteOrExec, new_handle.receive());
+ zx_status_t status = handle_.replace(kNoWriteOrExec, &handle_);
if (status != ZX_OK) {
ZX_DLOG(ERROR, status) << "zx_handle_replace";
return false;
}
- ignore_result(old_handle.release());
- handle_ = std::move(new_handle);
mode_ = Mode::kReadOnly;
return true;
}
+bool PlatformSharedMemoryRegion::ConvertToUnsafe() {
+ if (!IsValid())
+ return false;
+
+ CHECK_EQ(mode_, Mode::kWritable)
+ << "Only writable shared memory region can be converted to unsafe";
+
+ mode_ = Mode::kUnsafe;
+ return true;
+}
+
bool PlatformSharedMemoryRegion::MapAt(off_t offset,
size_t size,
void** memory,
@@ -133,21 +138,19 @@ PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Create(Mode mode,
CHECK_NE(mode, Mode::kReadOnly) << "Creating a region in read-only mode will "
"lead to this region being non-modifiable";
- ScopedZxHandle vmo;
- zx_status_t status = zx_vmo_create(rounded_size, 0, vmo.receive());
+ zx::vmo vmo;
+ zx_status_t status = zx::vmo::create(rounded_size, 0, &vmo);
if (status != ZX_OK) {
ZX_DLOG(ERROR, status) << "zx_vmo_create";
return {};
}
const int kNoExecFlags = ZX_DEFAULT_VMO_RIGHTS & ~ZX_RIGHT_EXECUTE;
- ScopedZxHandle old_vmo(std::move(vmo));
- status = zx_handle_replace(old_vmo.get(), kNoExecFlags, vmo.receive());
+ status = vmo.replace(kNoExecFlags, &vmo);
if (status != ZX_OK) {
ZX_DLOG(ERROR, status) << "zx_handle_replace";
return {};
}
- ignore_result(old_vmo.release());
return PlatformSharedMemoryRegion(std::move(vmo), mode, size,
UnguessableToken::Create());
@@ -180,7 +183,7 @@ bool PlatformSharedMemoryRegion::CheckPlatformHandlePermissionsCorrespondToMode(
}
PlatformSharedMemoryRegion::PlatformSharedMemoryRegion(
- ScopedZxHandle handle,
+ zx::vmo handle,
Mode mode,
size_t size,
const UnguessableToken& guid)
diff --git a/chromium/base/memory/platform_shared_memory_region_mac.cc b/chromium/base/memory/platform_shared_memory_region_mac.cc
index b4d12baea47..4a8b440c266 100644
--- a/chromium/base/memory/platform_shared_memory_region_mac.cc
+++ b/chromium/base/memory/platform_shared_memory_region_mac.cc
@@ -113,6 +113,17 @@ bool PlatformSharedMemoryRegion::ConvertToReadOnly(void* mapped_addr) {
return true;
}
+bool PlatformSharedMemoryRegion::ConvertToUnsafe() {
+ if (!IsValid())
+ return false;
+
+ CHECK_EQ(mode_, Mode::kWritable)
+ << "Only writable shared memory region can be converted to unsafe";
+
+ mode_ = Mode::kUnsafe;
+ return true;
+}
+
bool PlatformSharedMemoryRegion::MapAt(off_t offset,
size_t size,
void** memory,
diff --git a/chromium/base/memory/platform_shared_memory_region_posix.cc b/chromium/base/memory/platform_shared_memory_region_posix.cc
index 8453c12eb5c..d4b6d5c00c1 100644
--- a/chromium/base/memory/platform_shared_memory_region_posix.cc
+++ b/chromium/base/memory/platform_shared_memory_region_posix.cc
@@ -31,6 +31,7 @@ struct ScopedPathUnlinkerTraits {
using ScopedPathUnlinker =
ScopedGeneric<const FilePath*, ScopedPathUnlinkerTraits>;
+#if !defined(OS_NACL)
bool CheckFDAccessMode(int fd, int expected_mode) {
int fd_status = fcntl(fd, F_GETFL);
if (fd_status == -1) {
@@ -47,6 +48,7 @@ bool CheckFDAccessMode(int fd, int expected_mode) {
return true;
}
+#endif // !defined(OS_NACL)
} // namespace
@@ -145,6 +147,18 @@ bool PlatformSharedMemoryRegion::ConvertToReadOnly() {
return true;
}
+bool PlatformSharedMemoryRegion::ConvertToUnsafe() {
+ if (!IsValid())
+ return false;
+
+ CHECK_EQ(mode_, Mode::kWritable)
+ << "Only writable shared memory region can be converted to unsafe";
+
+ handle_.readonly_fd.reset();
+ mode_ = Mode::kUnsafe;
+ return true;
+}
+
bool PlatformSharedMemoryRegion::MapAt(off_t offset,
size_t size,
void** memory,
@@ -161,7 +175,7 @@ bool PlatformSharedMemoryRegion::MapAt(off_t offset,
*memory = mmap(nullptr, size, PROT_READ | (write_allowed ? PROT_WRITE : 0),
MAP_SHARED, handle_.fd.get(), offset);
- bool mmap_succeeded = *memory && *memory != reinterpret_cast<void*>(-1);
+ bool mmap_succeeded = *memory && *memory != MAP_FAILED;
if (!mmap_succeeded) {
DPLOG(ERROR) << "mmap " << handle_.fd.get() << " failed";
return false;
@@ -263,6 +277,7 @@ bool PlatformSharedMemoryRegion::CheckPlatformHandlePermissionsCorrespondToMode(
PlatformHandle handle,
Mode mode,
size_t size) {
+#if !defined(OS_NACL)
if (!CheckFDAccessMode(handle.fd,
mode == Mode::kReadOnly ? O_RDONLY : O_RDWR)) {
return false;
@@ -278,6 +293,28 @@ bool PlatformSharedMemoryRegion::CheckPlatformHandlePermissionsCorrespondToMode(
}
return true;
+#else
+ // fcntl(_, F_GETFL) is not implemented on NaCl.
+ void* temp_memory = nullptr;
+ temp_memory =
+ mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED, handle.fd, 0);
+
+ bool mmap_succeeded = temp_memory && temp_memory != MAP_FAILED;
+ if (mmap_succeeded)
+ munmap(temp_memory, size);
+
+ bool is_read_only = !mmap_succeeded;
+ bool expected_read_only = mode == Mode::kReadOnly;
+
+ if (is_read_only != expected_read_only) {
+ DLOG(ERROR) << "Descriptor has a wrong access mode: it is"
+ << (is_read_only ? " " : " not ") << "read-only but it should"
+ << (expected_read_only ? " " : " not ") << "be";
+ return false;
+ }
+
+ return true;
+#endif // !defined(OS_NACL)
}
PlatformSharedMemoryRegion::PlatformSharedMemoryRegion(
diff --git a/chromium/base/memory/platform_shared_memory_region_unittest.cc b/chromium/base/memory/platform_shared_memory_region_unittest.cc
index df3e526adfd..5a83ee9b468 100644
--- a/chromium/base/memory/platform_shared_memory_region_unittest.cc
+++ b/chromium/base/memory/platform_shared_memory_region_unittest.cc
@@ -75,6 +75,26 @@ TEST_F(PlatformSharedMemoryRegionTest, ReportedSizeIsRequestedSize) {
}
}
+// Tests that a writable region can be converted to read-only.
+TEST_F(PlatformSharedMemoryRegionTest, ConvertWritableToReadOnly) {
+ PlatformSharedMemoryRegion region =
+ PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
+ ASSERT_TRUE(region.IsValid());
+ EXPECT_EQ(region.GetMode(), PlatformSharedMemoryRegion::Mode::kWritable);
+ ASSERT_TRUE(region.ConvertToReadOnly());
+ EXPECT_EQ(region.GetMode(), PlatformSharedMemoryRegion::Mode::kReadOnly);
+}
+
+// Tests that a writable region can be converted to unsafe.
+TEST_F(PlatformSharedMemoryRegionTest, ConvertWritableToUnsafe) {
+ PlatformSharedMemoryRegion region =
+ PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
+ ASSERT_TRUE(region.IsValid());
+ EXPECT_EQ(region.GetMode(), PlatformSharedMemoryRegion::Mode::kWritable);
+ ASSERT_TRUE(region.ConvertToUnsafe());
+ EXPECT_EQ(region.GetMode(), PlatformSharedMemoryRegion::Mode::kUnsafe);
+}
+
// Tests that the platform-specific handle converted to read-only cannot be used
// to perform a writable mapping with low-level system APIs like mmap().
TEST_F(PlatformSharedMemoryRegionTest, ReadOnlyHandleIsNotWritable) {
@@ -155,8 +175,7 @@ TEST_F(PlatformSharedMemoryRegionTest, MapAtWithOverflowTest) {
EXPECT_FALSE(mapping.IsValid());
}
-#if defined(OS_POSIX) && !defined(OS_ANDROID) && !defined(OS_FUCHSIA) && \
- !defined(OS_MACOSX)
+#if defined(OS_POSIX) && !defined(OS_ANDROID) && !defined(OS_MACOSX)
// Tests that the second handle is closed after a conversion to read-only on
// POSIX.
TEST_F(PlatformSharedMemoryRegionTest,
@@ -168,6 +187,17 @@ TEST_F(PlatformSharedMemoryRegionTest,
FDPair fds = region.GetPlatformHandle();
EXPECT_LT(fds.readonly_fd, 0);
}
+
+// Tests that the second handle is closed after a conversion to unsafe on
+// POSIX.
+TEST_F(PlatformSharedMemoryRegionTest, ConvertToUnsafeInvalidatesSecondHandle) {
+ PlatformSharedMemoryRegion region =
+ PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
+ ASSERT_TRUE(region.IsValid());
+ ASSERT_TRUE(region.ConvertToUnsafe());
+ FDPair fds = region.GetPlatformHandle();
+ EXPECT_LT(fds.readonly_fd, 0);
+}
#endif
#if defined(OS_MACOSX) && !defined(OS_IOS)
@@ -284,5 +314,34 @@ TEST_F(PlatformSharedMemoryRegionTest,
EXPECT_DEATH_IF_SUPPORTED(region.ConvertToReadOnly(), kErrorRegex);
}
+// Tests that it's prohibited to convert a read-only region to unsafe.
+TEST_F(PlatformSharedMemoryRegionTest, ReadOnlyRegionConvertToUnsafeDeathTest) {
+#ifdef OFFICIAL_BUILD
+ const char kErrorRegex[] = "";
+#else
+ const char kErrorRegex[] =
+ "Only writable shared memory region can be converted to unsafe";
+#endif
+ PlatformSharedMemoryRegion region =
+ PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
+ ASSERT_TRUE(region.IsValid());
+ ASSERT_TRUE(region.ConvertToReadOnly());
+ EXPECT_DEATH_IF_SUPPORTED(region.ConvertToUnsafe(), kErrorRegex);
+}
+
+// Tests that it's prohibited to convert an unsafe region to unsafe.
+TEST_F(PlatformSharedMemoryRegionTest, UnsafeRegionConvertToUnsafeDeathTest) {
+#ifdef OFFICIAL_BUILD
+ const char kErrorRegex[] = "";
+#else
+ const char kErrorRegex[] =
+ "Only writable shared memory region can be converted to unsafe";
+#endif
+ PlatformSharedMemoryRegion region =
+ PlatformSharedMemoryRegion::CreateUnsafe(kRegionSize);
+ ASSERT_TRUE(region.IsValid());
+ EXPECT_DEATH_IF_SUPPORTED(region.ConvertToUnsafe(), kErrorRegex);
+}
+
} // namespace subtle
} // namespace base
diff --git a/chromium/base/memory/platform_shared_memory_region_win.cc b/chromium/base/memory/platform_shared_memory_region_win.cc
index b6608da02f0..034664154e7 100644
--- a/chromium/base/memory/platform_shared_memory_region_win.cc
+++ b/chromium/base/memory/platform_shared_memory_region_win.cc
@@ -206,6 +206,17 @@ bool PlatformSharedMemoryRegion::ConvertToReadOnly() {
return true;
}
+bool PlatformSharedMemoryRegion::ConvertToUnsafe() {
+ if (!IsValid())
+ return false;
+
+ CHECK_EQ(mode_, Mode::kWritable)
+ << "Only writable shared memory region can be converted to unsafe";
+
+ mode_ = Mode::kUnsafe;
+ return true;
+}
+
bool PlatformSharedMemoryRegion::MapAt(off_t offset,
size_t size,
void** memory,
diff --git a/chromium/base/memory/read_only_shared_memory_region.h b/chromium/base/memory/read_only_shared_memory_region.h
index 54e73edcfcb..4f92762d5e5 100644
--- a/chromium/base/memory/read_only_shared_memory_region.h
+++ b/chromium/base/memory/read_only_shared_memory_region.h
@@ -89,6 +89,12 @@ class BASE_EXPORT ReadOnlySharedMemoryRegion {
return handle_.GetSize();
}
+ // Returns 128-bit GUID of the region.
+ const UnguessableToken& GetGUID() const {
+ DCHECK(IsValid());
+ return handle_.GetGUID();
+ }
+
private:
explicit ReadOnlySharedMemoryRegion(
subtle::PlatformSharedMemoryRegion handle);
diff --git a/chromium/base/memory/ref_counted_unittest.cc b/chromium/base/memory/ref_counted_unittest.cc
index d88fc5409a8..df1c30f843d 100644
--- a/chromium/base/memory/ref_counted_unittest.cc
+++ b/chromium/base/memory/ref_counted_unittest.cc
@@ -154,6 +154,25 @@ class InitialRefCountIsOne : public base::RefCounted<InitialRefCountIsOne> {
~InitialRefCountIsOne() = default;
};
+// Checks that the scoped_refptr is null before the reference counted object is
+// destroyed.
+class CheckRefptrNull : public base::RefCounted<CheckRefptrNull> {
+ public:
+ // Set the last scoped_refptr that will have a reference to this object.
+ void set_scoped_refptr(scoped_refptr<CheckRefptrNull>* ptr) { ptr_ = ptr; }
+
+ protected:
+ virtual ~CheckRefptrNull() {
+ EXPECT_NE(ptr_, nullptr);
+ EXPECT_EQ(ptr_->get(), nullptr);
+ }
+
+ private:
+ friend class base::RefCounted<CheckRefptrNull>;
+
+ scoped_refptr<CheckRefptrNull>* ptr_ = nullptr;
+};
+
} // end namespace
TEST(RefCountedUnitTest, TestSelfAssignment) {
@@ -559,27 +578,80 @@ TEST(RefCountedUnitTest, TestOverloadResolutionMove) {
TEST(RefCountedUnitTest, TestMakeRefCounted) {
scoped_refptr<Derived> derived = new Derived;
EXPECT_TRUE(derived->HasOneRef());
- derived = nullptr;
+ derived.reset();
scoped_refptr<Derived> derived2 = base::MakeRefCounted<Derived>();
EXPECT_TRUE(derived2->HasOneRef());
- derived2 = nullptr;
+ derived2.reset();
}
TEST(RefCountedUnitTest, TestInitialRefCountIsOne) {
scoped_refptr<InitialRefCountIsOne> obj =
base::MakeRefCounted<InitialRefCountIsOne>();
EXPECT_TRUE(obj->HasOneRef());
- obj = nullptr;
+ obj.reset();
scoped_refptr<InitialRefCountIsOne> obj2 =
base::AdoptRef(new InitialRefCountIsOne);
EXPECT_TRUE(obj2->HasOneRef());
- obj2 = nullptr;
+ obj2.reset();
scoped_refptr<Other> obj3 = base::MakeRefCounted<Other>();
EXPECT_TRUE(obj3->HasOneRef());
- obj3 = nullptr;
+ obj3.reset();
+}
+
+TEST(RefCountedUnitTest, TestPrivateDestructorWithDeleter) {
+ // Ensure that RefCounted doesn't need the access to the pointee dtor when
+ // a custom deleter is given.
+ scoped_refptr<HasPrivateDestructorWithDeleter> obj =
+ base::MakeRefCounted<HasPrivateDestructorWithDeleter>();
+}
+
+TEST(RefCountedUnitTest, TestReset) {
+ ScopedRefPtrCountBase::reset_count();
+
+ // Create ScopedRefPtrCountBase that is referenced by |obj1| and |obj2|.
+ scoped_refptr<ScopedRefPtrCountBase> obj1 =
+ base::MakeRefCounted<ScopedRefPtrCountBase>();
+ scoped_refptr<ScopedRefPtrCountBase> obj2 = obj1;
+ EXPECT_NE(obj1.get(), nullptr);
+ EXPECT_NE(obj2.get(), nullptr);
+ EXPECT_EQ(ScopedRefPtrCountBase::constructor_count(), 1);
+ EXPECT_EQ(ScopedRefPtrCountBase::destructor_count(), 0);
+
+ // Check that calling reset() on |obj1| resets it. |obj2| still has a
+ // reference to the ScopedRefPtrCountBase so it shouldn't be reset.
+ obj1.reset();
+ EXPECT_EQ(obj1.get(), nullptr);
+ EXPECT_EQ(ScopedRefPtrCountBase::constructor_count(), 1);
+ EXPECT_EQ(ScopedRefPtrCountBase::destructor_count(), 0);
+
+ // Check that calling reset() on |obj2| resets it and causes the deletion of
+ // the ScopedRefPtrCountBase.
+ obj2.reset();
+ EXPECT_EQ(obj2.get(), nullptr);
+ EXPECT_EQ(ScopedRefPtrCountBase::constructor_count(), 1);
+ EXPECT_EQ(ScopedRefPtrCountBase::destructor_count(), 1);
+}
+
+TEST(RefCountedUnitTest, TestResetAlreadyNull) {
+ // Check that calling reset() on a null scoped_refptr does nothing.
+ scoped_refptr<ScopedRefPtrCountBase> obj;
+ obj.reset();
+ // |obj| should still be null after calling reset().
+ EXPECT_EQ(obj.get(), nullptr);
+}
+
+TEST(RefCountedUnitTest, CheckScopedRefptrNullBeforeObjectDestruction) {
+ scoped_refptr<CheckRefptrNull> obj = base::MakeRefCounted<CheckRefptrNull>();
+ obj->set_scoped_refptr(&obj);
+
+ // Check that when reset() is called the scoped_refptr internal pointer is set
+ // to null before the reference counted object is destroyed. This check is
+ // done by the CheckRefptrNull destructor.
+ obj.reset();
+ EXPECT_EQ(obj.get(), nullptr);
}
TEST(RefCountedDeathTest, TestAdoptRef) {
@@ -597,10 +669,3 @@ TEST(RefCountedDeathTest, TestAdoptRef) {
base::MakeRefCounted<InitialRefCountIsOne>();
EXPECT_DCHECK_DEATH(base::AdoptRef(obj.get()));
}
-
-TEST(RefCountedUnitTest, TestPrivateDestructorWithDeleter) {
- // Ensure that RefCounted doesn't need the access to the pointee dtor when
- // a custom deleter is given.
- scoped_refptr<HasPrivateDestructorWithDeleter> obj =
- base::MakeRefCounted<HasPrivateDestructorWithDeleter>();
-}
diff --git a/chromium/base/memory/scoped_refptr.h b/chromium/base/memory/scoped_refptr.h
index a2576170bf9..389d0cbf628 100644
--- a/chromium/base/memory/scoped_refptr.h
+++ b/chromium/base/memory/scoped_refptr.h
@@ -123,7 +123,7 @@ scoped_refptr<T> WrapRefCounted(T* t) {
// void some_other_function() {
// scoped_refptr<MyFoo> foo = MakeRefCounted<MyFoo>();
// ...
-// foo = nullptr; // explicitly releases |foo|
+// foo.reset(); // explicitly releases |foo|
// ...
// if (foo)
// foo->Method(param);
@@ -228,6 +228,10 @@ class scoped_refptr {
return *this;
}
+ // Sets managed object to null and releases reference to the previous managed
+ // object, if it existed.
+ void reset() { scoped_refptr().swap(*this); }
+
void swap(scoped_refptr& r) noexcept { std::swap(ptr_, r.ptr_); }
explicit operator bool() const { return ptr_ != nullptr; }
diff --git a/chromium/base/memory/shared_memory_fuchsia.cc b/chromium/base/memory/shared_memory_fuchsia.cc
index 4036bf6f2e1..c22f22b87e4 100644
--- a/chromium/base/memory/shared_memory_fuchsia.cc
+++ b/chromium/base/memory/shared_memory_fuchsia.cc
@@ -6,13 +6,12 @@
#include <limits>
-#include <zircon/process.h>
+#include <lib/zx/vmar.h>
+#include <lib/zx/vmo.h>
#include <zircon/rights.h>
-#include <zircon/syscalls.h>
#include "base/bits.h"
-#include "base/fuchsia/scoped_zx_handle.h"
-#include "base/logging.h"
+#include "base/fuchsia/fuchsia_logging.h"
#include "base/memory/shared_memory_tracker.h"
#include "base/process/process_metrics.h"
@@ -53,24 +52,21 @@ bool SharedMemory::CreateAndMapAnonymous(size_t size) {
bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
requested_size_ = options.size;
mapped_size_ = bits::Align(requested_size_, GetPageSize());
- ScopedZxHandle vmo;
- zx_status_t status = zx_vmo_create(mapped_size_, 0, vmo.receive());
+ zx::vmo vmo;
+ zx_status_t status = zx::vmo::create(mapped_size_, 0, &vmo);
if (status != ZX_OK) {
- DLOG(ERROR) << "zx_vmo_create failed, status=" << status;
+ ZX_DLOG(ERROR, status) << "zx_vmo_create";
return false;
}
if (!options.executable) {
// If options.executable isn't set, drop that permission by replacement.
const int kNoExecFlags = ZX_DEFAULT_VMO_RIGHTS & ~ZX_RIGHT_EXECUTE;
- ScopedZxHandle old_vmo(std::move(vmo));
- status = zx_handle_replace(old_vmo.get(), kNoExecFlags, vmo.receive());
+ status = vmo.replace(kNoExecFlags, &vmo);
if (status != ZX_OK) {
- DLOG(ERROR) << "zx_handle_replace() failed: "
- << zx_status_get_string(status);
+ ZX_DLOG(ERROR, status) << "zx_handle_replace";
return false;
}
- ignore_result(old_vmo.release());
}
shm_ = SharedMemoryHandle(vmo.release(), mapped_size_,
@@ -92,10 +88,10 @@ bool SharedMemory::MapAt(off_t offset, size_t bytes) {
if (!read_only_)
flags |= ZX_VM_FLAG_PERM_WRITE;
uintptr_t addr;
- zx_status_t status = zx_vmar_map(zx_vmar_root_self(), 0, shm_.GetHandle(),
- offset, bytes, flags, &addr);
+ zx_status_t status = zx::vmar::root_self()->map(
+ 0, *zx::unowned_vmo(shm_.GetHandle()), offset, bytes, flags, &addr);
if (status != ZX_OK) {
- DLOG(ERROR) << "zx_vmar_map failed, status=" << status;
+ ZX_DLOG(ERROR, status) << "zx_vmar_map";
return false;
}
memory_ = reinterpret_cast<void*>(addr);
@@ -113,9 +109,9 @@ bool SharedMemory::Unmap() {
SharedMemoryTracker::GetInstance()->DecrementMemoryUsage(*this);
uintptr_t addr = reinterpret_cast<uintptr_t>(memory_);
- zx_status_t status = zx_vmar_unmap(zx_vmar_root_self(), addr, mapped_size_);
+ zx_status_t status = zx::vmar::root_self()->unmap(addr, mapped_size_);
if (status != ZX_OK) {
- DLOG(ERROR) << "zx_vmar_unmap failed, status=" << status;
+ ZX_DLOG(ERROR, status) << "zx_vmar_unmap";
return false;
}
@@ -149,16 +145,17 @@ SharedMemoryHandle SharedMemory::DuplicateHandle(
}
SharedMemoryHandle SharedMemory::GetReadOnlyHandle() const {
- zx_handle_t duped_handle;
+ zx::vmo duped_handle;
const int kNoWriteOrExec =
ZX_DEFAULT_VMO_RIGHTS &
~(ZX_RIGHT_WRITE | ZX_RIGHT_EXECUTE | ZX_RIGHT_SET_PROPERTY);
- zx_status_t status =
- zx_handle_duplicate(shm_.GetHandle(), kNoWriteOrExec, &duped_handle);
+ zx_status_t status = zx::unowned_vmo(shm_.GetHandle())
+ ->duplicate(kNoWriteOrExec, &duped_handle);
if (status != ZX_OK)
return SharedMemoryHandle();
- SharedMemoryHandle handle(duped_handle, shm_.GetSize(), shm_.GetGUID());
+ SharedMemoryHandle handle(duped_handle.release(), shm_.GetSize(),
+ shm_.GetGUID());
handle.SetOwnershipPassesToIPC(true);
return handle;
}
diff --git a/chromium/base/memory/shared_memory_handle.h b/chromium/base/memory/shared_memory_handle.h
index ae143af2b3f..dd3d47aa0cf 100644
--- a/chromium/base/memory/shared_memory_handle.h
+++ b/chromium/base/memory/shared_memory_handle.h
@@ -19,9 +19,11 @@
#include "base/file_descriptor_posix.h"
#include "base/macros.h"
#include "base/process/process_handle.h"
-#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+#elif defined(OS_POSIX)
#include <sys/types.h>
#include "base/file_descriptor_posix.h"
+#elif defined(OS_FUCHSIA)
+#include <zircon/types.h>
#endif
namespace base {
diff --git a/chromium/base/memory/shared_memory_posix.cc b/chromium/base/memory/shared_memory_posix.cc
index d3163e5a640..e1289e7e1da 100644
--- a/chromium/base/memory/shared_memory_posix.cc
+++ b/chromium/base/memory/shared_memory_posix.cc
@@ -164,13 +164,6 @@ bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
return false;
}
}
- if (fd.is_valid()) {
- // "a+" is always appropriate: if it's a new file, a+ is similar to w+.
- if (!fdopen(fd.get(), "a+")) {
- PLOG(ERROR) << "Creating file stream in " << path.value() << " failed";
- return false;
- }
- }
}
if (fd.is_valid() && fix_size) {
// Get current size.
diff --git a/chromium/base/memory/shared_memory_region_unittest.cc b/chromium/base/memory/shared_memory_region_unittest.cc
index fcecb1fa221..78ac6308982 100644
--- a/chromium/base/memory/shared_memory_region_unittest.cc
+++ b/chromium/base/memory/shared_memory_region_unittest.cc
@@ -211,6 +211,7 @@ TYPED_TEST_CASE(DuplicatableSharedMemoryRegionTest, DuplicatableRegionTypes);
TYPED_TEST(DuplicatableSharedMemoryRegionTest, Duplicate) {
TypeParam dup_region = this->region_.Duplicate();
+ EXPECT_EQ(this->region_.GetGUID(), dup_region.GetGUID());
typename TypeParam::MappingType mapping = dup_region.Map();
ASSERT_TRUE(mapping.IsValid());
EXPECT_NE(this->rw_mapping_.memory(), mapping.memory());
diff --git a/chromium/base/memory/shared_memory_unittest.cc b/chromium/base/memory/shared_memory_unittest.cc
index b754540741a..dd76a86a1ce 100644
--- a/chromium/base/memory/shared_memory_unittest.cc
+++ b/chromium/base/memory/shared_memory_unittest.cc
@@ -52,9 +52,8 @@
#endif
#if defined(OS_FUCHSIA)
-#include <zircon/process.h>
-#include <zircon/syscalls.h>
-#include "base/fuchsia/scoped_zx_handle.h"
+#include <lib/zx/vmar.h>
+#include <lib/zx/vmo.h>
#endif
namespace base {
@@ -418,17 +417,18 @@ TEST_P(SharedMemoryTest, GetReadOnlyHandle) {
(void)handle;
#elif defined(OS_FUCHSIA)
uintptr_t addr;
- EXPECT_NE(ZX_OK, zx_vmar_map(zx_vmar_root_self(), 0, handle.GetHandle(), 0,
- contents.size(), ZX_VM_FLAG_PERM_WRITE, &addr))
+ EXPECT_NE(ZX_OK, zx::vmar::root_self()->map(
+ 0, *zx::unowned_vmo(handle.GetHandle()), 0,
+ contents.size(), ZX_VM_FLAG_PERM_WRITE, &addr))
<< "Shouldn't be able to map as writable.";
- ScopedZxHandle duped_handle;
- EXPECT_NE(ZX_OK, zx_handle_duplicate(handle.GetHandle(), ZX_RIGHT_WRITE,
- duped_handle.receive()))
+ zx::vmo duped_handle;
+ EXPECT_NE(ZX_OK, zx::unowned_vmo(handle.GetHandle())
+ ->duplicate(ZX_RIGHT_WRITE, &duped_handle))
<< "Shouldn't be able to duplicate the handle into a writable one.";
- EXPECT_EQ(ZX_OK, zx_handle_duplicate(handle.GetHandle(), ZX_RIGHT_READ,
- duped_handle.receive()))
+ EXPECT_EQ(ZX_OK, zx::unowned_vmo(handle.GetHandle())
+ ->duplicate(ZX_RIGHT_READ, &duped_handle))
<< "Should be able to duplicate the handle into a readable one.";
#elif defined(OS_POSIX)
int handle_fd = SharedMemory::GetFdFromSharedMemoryHandle(handle);
@@ -588,6 +588,15 @@ TEST_P(SharedMemoryTest, MapTwice) {
#if !defined(OS_IOS)
// Create a shared memory object, mmap it, and mprotect it to PROT_EXEC.
TEST_P(SharedMemoryTest, AnonymousExecutable) {
+#if defined(OS_LINUX)
+ // On Chromecast both /dev/shm and /tmp are mounted with 'noexec' option,
+ // which makes this test fail. But Chromecast doesn't use NaCL so we don't
+ // need this.
+ if (!IsPathExecutable(FilePath("/dev/shm")) &&
+ !IsPathExecutable(FilePath("/tmp"))) {
+ return;
+ }
+#endif // OS_LINUX
const uint32_t kTestSize = 1 << 16;
SharedMemory shared_memory;
diff --git a/chromium/base/memory/unsafe_shared_memory_region.h b/chromium/base/memory/unsafe_shared_memory_region.h
index d77eaaae471..ea637cd51b0 100644
--- a/chromium/base/memory/unsafe_shared_memory_region.h
+++ b/chromium/base/memory/unsafe_shared_memory_region.h
@@ -88,6 +88,12 @@ class BASE_EXPORT UnsafeSharedMemoryRegion {
return handle_.GetSize();
}
+ // Returns 128-bit GUID of the region.
+ const UnguessableToken& GetGUID() const {
+ DCHECK(IsValid());
+ return handle_.GetGUID();
+ }
+
private:
FRIEND_TEST_ALL_PREFIXES(DiscardableSharedMemoryTest,
LockShouldFailIfPlatformLockPagesFails);
diff --git a/chromium/base/memory/weak_ptr.cc b/chromium/base/memory/weak_ptr.cc
index d2a7d89e566..c993fcb8a13 100644
--- a/chromium/base/memory/weak_ptr.cc
+++ b/chromium/base/memory/weak_ptr.cc
@@ -7,25 +7,31 @@
namespace base {
namespace internal {
-WeakReference::Flag::Flag() : is_valid_(true) {
+WeakReference::Flag::Flag() {
// Flags only become bound when checked for validity, or invalidated,
// so that we can check that later validity/invalidation operations on
// the same Flag take place on the same sequenced thread.
- sequence_checker_.DetachFromSequence();
+ DETACH_FROM_SEQUENCE(sequence_checker_);
}
void WeakReference::Flag::Invalidate() {
// The flag being invalidated with a single ref implies that there are no
// weak pointers in existence. Allow deletion on other thread in this case.
+#if DCHECK_IS_ON()
DCHECK(sequence_checker_.CalledOnValidSequence() || HasOneRef())
<< "WeakPtrs must be invalidated on the same sequenced thread.";
- is_valid_ = false;
+#endif
+ invalidated_.Set();
}
bool WeakReference::Flag::IsValid() const {
- DCHECK(sequence_checker_.CalledOnValidSequence())
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_)
<< "WeakPtrs must be checked on the same sequenced thread.";
- return is_valid_;
+ return !invalidated_.IsSet();
+}
+
+bool WeakReference::Flag::MaybeValid() const {
+ return !invalidated_.IsSet();
}
WeakReference::Flag::~Flag() = default;
@@ -40,10 +46,14 @@ WeakReference::WeakReference(WeakReference&& other) = default;
WeakReference::WeakReference(const WeakReference& other) = default;
-bool WeakReference::is_valid() const {
+bool WeakReference::IsValid() const {
return flag_ && flag_->IsValid();
}
+bool WeakReference::MaybeValid() const {
+ return flag_ && flag_->MaybeValid();
+}
+
WeakReferenceOwner::WeakReferenceOwner() = default;
WeakReferenceOwner::~WeakReferenceOwner() {
diff --git a/chromium/base/memory/weak_ptr.h b/chromium/base/memory/weak_ptr.h
index 34e7d2e3584..8228b2b8fbd 100644
--- a/chromium/base/memory/weak_ptr.h
+++ b/chromium/base/memory/weak_ptr.h
@@ -48,13 +48,13 @@
// ------------------------- IMPORTANT: Thread-safety -------------------------
-// Weak pointers may be passed safely between threads, but must always be
+// Weak pointers may be passed safely between sequences, but must always be
// dereferenced and invalidated on the same SequencedTaskRunner otherwise
// checking the pointer would be racey.
//
// To ensure correct use, the first time a WeakPtr issued by a WeakPtrFactory
// is dereferenced, the factory and its WeakPtrs become bound to the calling
-// thread or current SequencedWorkerPool token, and cannot be dereferenced or
+// sequence or current SequencedWorkerPool token, and cannot be dereferenced or
// invalidated on any other task runner. Bound WeakPtrs can still be handed
// off to other task runners, e.g. to use to post tasks back to object on the
// bound sequence.
@@ -64,8 +64,8 @@
// destroyed, or new WeakPtr objects may be used, from a different sequence.
//
// Thus, at least one WeakPtr object must exist and have been dereferenced on
-// the correct thread to enforce that other WeakPtr objects will enforce they
-// are used on the desired thread.
+// the correct sequence to enforce that other WeakPtr objects will enforce they
+// are used on the desired sequence.
#ifndef BASE_MEMORY_WEAK_PTR_H_
#define BASE_MEMORY_WEAK_PTR_H_
@@ -78,6 +78,7 @@
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/sequence_checker.h"
+#include "base/synchronization/atomic_flag.h"
namespace base {
@@ -99,13 +100,15 @@ class BASE_EXPORT WeakReference {
void Invalidate();
bool IsValid() const;
+ bool MaybeValid() const;
+
private:
friend class base::RefCountedThreadSafe<Flag>;
~Flag();
- SequenceChecker sequence_checker_;
- bool is_valid_;
+ SEQUENCE_CHECKER(sequence_checker_);
+ AtomicFlag invalidated_;
};
WeakReference();
@@ -117,7 +120,8 @@ class BASE_EXPORT WeakReference {
WeakReference& operator=(WeakReference&& other) = default;
WeakReference& operator=(const WeakReference& other) = default;
- bool is_valid() const;
+ bool IsValid() const;
+ bool MaybeValid() const;
private:
scoped_refptr<const Flag> flag_;
@@ -240,7 +244,7 @@ class WeakPtr : public internal::WeakPtrBase {
}
T* get() const {
- return ref_.is_valid() ? reinterpret_cast<T*>(ptr_) : nullptr;
+ return ref_.IsValid() ? reinterpret_cast<T*>(ptr_) : nullptr;
}
T& operator*() const {
@@ -255,6 +259,15 @@ class WeakPtr : public internal::WeakPtrBase {
// Allow conditionals to test validity, e.g. if (weak_ptr) {...};
explicit operator bool() const { return get() != nullptr; }
+ // Returns false if the WeakPtr is confirmed to be invalid. This call is safe
+ // to make from any thread, e.g. to optimize away unnecessary work, but
+ // operator bool() must always be called, on the correct sequence, before
+ // actually using the pointer.
+ //
+ // Warning: as with any object, this call is only thread-safe if the WeakPtr
+ // instance isn't being re-assigned or reset() racily with this call.
+ bool MaybeValid() const { return ref_.MaybeValid(); }
+
private:
friend class internal::SupportsWeakPtrBase;
template <typename U> friend class WeakPtr;
diff --git a/chromium/base/memory/weak_ptr_unittest.cc b/chromium/base/memory/weak_ptr_unittest.cc
index f8dfb7c0f0d..e15d167aa54 100644
--- a/chromium/base/memory/weak_ptr_unittest.cc
+++ b/chromium/base/memory/weak_ptr_unittest.cc
@@ -13,6 +13,7 @@
#include "base/single_thread_task_runner.h"
#include "base/synchronization/waitable_event.h"
#include "base/test/gtest_util.h"
+#include "base/test/test_timeouts.h"
#include "base/threading/thread.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -391,6 +392,42 @@ TEST(WeakPtrTest, InvalidateWeakPtrs) {
EXPECT_FALSE(factory.HasWeakPtrs());
}
+TEST(WeakPtrTest, MaybeValidOnSameSequence) {
+ int data;
+ WeakPtrFactory<int> factory(&data);
+ WeakPtr<int> ptr = factory.GetWeakPtr();
+ EXPECT_TRUE(ptr.MaybeValid());
+ factory.InvalidateWeakPtrs();
+ // Since InvalidateWeakPtrs() ran on this sequence, MaybeValid() should be
+ // false.
+ EXPECT_FALSE(ptr.MaybeValid());
+}
+
+TEST(WeakPtrTest, MaybeValidOnOtherSequence) {
+ int data;
+ WeakPtrFactory<int> factory(&data);
+ WeakPtr<int> ptr = factory.GetWeakPtr();
+ EXPECT_TRUE(ptr.MaybeValid());
+
+ base::Thread other_thread("other_thread");
+ other_thread.StartAndWaitForTesting();
+ other_thread.task_runner()->PostTask(
+ FROM_HERE,
+ base::BindOnce(
+ [](WeakPtr<int> ptr) {
+ // Check that MaybeValid() _eventually_ returns false.
+ const TimeDelta timeout = TestTimeouts::tiny_timeout();
+ const TimeTicks begin = TimeTicks::Now();
+ while (ptr.MaybeValid() && (TimeTicks::Now() - begin) < timeout)
+ PlatformThread::YieldCurrentThread();
+ EXPECT_FALSE(ptr.MaybeValid());
+ },
+ ptr));
+ factory.InvalidateWeakPtrs();
+ // |other_thread|'s destructor will join, ensuring we wait for the task to be
+ // run.
+}
+
TEST(WeakPtrTest, HasWeakPtrs) {
int data;
WeakPtrFactory<int> factory(&data);
diff --git a/chromium/base/memory/writable_shared_memory_region.cc b/chromium/base/memory/writable_shared_memory_region.cc
index 0806c3782da..063e6720c4c 100644
--- a/chromium/base/memory/writable_shared_memory_region.cc
+++ b/chromium/base/memory/writable_shared_memory_region.cc
@@ -42,6 +42,15 @@ ReadOnlySharedMemoryRegion WritableSharedMemoryRegion::ConvertToReadOnly(
return ReadOnlySharedMemoryRegion::Deserialize(std::move(handle));
}
+UnsafeSharedMemoryRegion WritableSharedMemoryRegion::ConvertToUnsafe(
+ WritableSharedMemoryRegion region) {
+ subtle::PlatformSharedMemoryRegion handle = std::move(region.handle_);
+ if (!handle.ConvertToUnsafe())
+ return {};
+
+ return UnsafeSharedMemoryRegion::Deserialize(std::move(handle));
+}
+
WritableSharedMemoryRegion::WritableSharedMemoryRegion() = default;
WritableSharedMemoryRegion::WritableSharedMemoryRegion(
WritableSharedMemoryRegion&& region) = default;
diff --git a/chromium/base/memory/writable_shared_memory_region.h b/chromium/base/memory/writable_shared_memory_region.h
index b953a10038d..f656db1ef42 100644
--- a/chromium/base/memory/writable_shared_memory_region.h
+++ b/chromium/base/memory/writable_shared_memory_region.h
@@ -9,6 +9,7 @@
#include "base/memory/platform_shared_memory_region.h"
#include "base/memory/read_only_shared_memory_region.h"
#include "base/memory/shared_memory_mapping.h"
+#include "base/memory/unsafe_shared_memory_region.h"
namespace base {
@@ -50,6 +51,11 @@ class BASE_EXPORT WritableSharedMemoryRegion {
static ReadOnlySharedMemoryRegion ConvertToReadOnly(
WritableSharedMemoryRegion region);
+ // Makes the region unsafe. The region cannot be converted to read-only after
+ // this call. Returns an invalid region on failure.
+ static UnsafeSharedMemoryRegion ConvertToUnsafe(
+ WritableSharedMemoryRegion region);
+
// Default constructor initializes an invalid instance.
WritableSharedMemoryRegion();
@@ -83,6 +89,12 @@ class BASE_EXPORT WritableSharedMemoryRegion {
return handle_.GetSize();
}
+ // Returns 128-bit GUID of the region.
+ const UnguessableToken& GetGUID() const {
+ DCHECK(IsValid());
+ return handle_.GetGUID();
+ }
+
private:
explicit WritableSharedMemoryRegion(
subtle::PlatformSharedMemoryRegion handle);
diff --git a/chromium/base/message_loop/incoming_task_queue.cc b/chromium/base/message_loop/incoming_task_queue.cc
index 9f5f855a4b8..c05c38fa8b4 100644
--- a/chromium/base/message_loop/incoming_task_queue.cc
+++ b/chromium/base/message_loop/incoming_task_queue.cc
@@ -7,8 +7,10 @@
#include <limits>
#include <utility>
+#include "base/bind.h"
+#include "base/callback_helpers.h"
#include "base/location.h"
-#include "base/message_loop/message_loop.h"
+#include "base/metrics/histogram_macros.h"
#include "base/synchronization/waitable_event.h"
#include "base/time/time.h"
#include "build/build_config.h"
@@ -24,19 +26,6 @@ namespace {
constexpr TimeDelta kTaskDelayWarningThreshold = TimeDelta::FromDays(14);
#endif
-// Returns true if MessagePump::ScheduleWork() must be called one
-// time for every task that is added to the MessageLoop incoming queue.
-bool AlwaysNotifyPump(MessageLoop::Type type) {
-#if defined(OS_ANDROID)
- // The Android UI message loop needs to get notified each time a task is
- // added
- // to the incoming queue.
- return type == MessageLoop::TYPE_UI || type == MessageLoop::TYPE_JAVA;
-#else
- return false;
-#endif
-}
-
TimeTicks CalculateDelayedRuntime(TimeDelta delay) {
TimeTicks delayed_run_time;
if (delay > TimeDelta())
@@ -48,17 +37,17 @@ TimeTicks CalculateDelayedRuntime(TimeDelta delay) {
} // namespace
-IncomingTaskQueue::IncomingTaskQueue(MessageLoop* message_loop)
- : always_schedule_work_(AlwaysNotifyPump(message_loop->type())),
- triage_tasks_(this),
- delayed_tasks_(this),
- deferred_tasks_(this),
- message_loop_(message_loop) {
- // The constructing sequence is not necessarily the running sequence in the
- // case of base::Thread.
+IncomingTaskQueue::IncomingTaskQueue(
+ std::unique_ptr<Observer> task_queue_observer)
+ : task_queue_observer_(std::move(task_queue_observer)),
+ triage_tasks_(this) {
+ // The constructing sequence is not necessarily the running sequence, e.g. in
+ // the case of a MessageLoop created unbound.
DETACH_FROM_SEQUENCE(sequence_checker_);
}
+IncomingTaskQueue::~IncomingTaskQueue() = default;
+
bool IncomingTaskQueue::AddToIncomingQueue(const Location& from_here,
OnceClosure task,
TimeDelta delay,
@@ -82,46 +71,19 @@ bool IncomingTaskQueue::AddToIncomingQueue(const Location& from_here,
pending_task.is_high_res = true;
}
#endif
- return PostPendingTask(&pending_task);
-}
-
-void IncomingTaskQueue::WillDestroyCurrentMessageLoop() {
- {
- AutoLock auto_lock(incoming_queue_lock_);
- accept_new_tasks_ = false;
- }
- {
- AutoLock auto_lock(message_loop_lock_);
- message_loop_ = nullptr;
- }
-}
-void IncomingTaskQueue::StartScheduling() {
- bool schedule_work;
- {
- AutoLock lock(incoming_queue_lock_);
- DCHECK(!is_ready_for_scheduling_);
- DCHECK(!message_loop_scheduled_);
- is_ready_for_scheduling_ = true;
- schedule_work = !incoming_queue_.empty();
- if (schedule_work)
- message_loop_scheduled_ = true;
- }
- if (schedule_work) {
- DCHECK(message_loop_);
- AutoLock auto_lock(message_loop_lock_);
- message_loop_->ScheduleWork();
- }
+ return PostPendingTask(&pending_task);
}
-IncomingTaskQueue::~IncomingTaskQueue() {
- // Verify that WillDestroyCurrentMessageLoop() has been called.
- DCHECK(!message_loop_);
+void IncomingTaskQueue::Shutdown() {
+ AutoLock auto_lock(incoming_queue_lock_);
+ accept_new_tasks_ = false;
}
-void IncomingTaskQueue::RunTask(PendingTask* pending_task) {
- DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
- task_annotator_.RunTask("MessageLoop::PostTask", pending_task);
+void IncomingTaskQueue::ReportMetricsOnIdle() const {
+ UMA_HISTOGRAM_COUNTS_1M(
+ "MessageLoop.DelayedTaskQueueForUI.PendingTasksCountOnIdle",
+ delayed_tasks_.Size());
}
IncomingTaskQueue::TriageQueue::TriageQueue(IncomingTaskQueue* outer)
@@ -142,10 +104,6 @@ PendingTask IncomingTaskQueue::TriageQueue::Pop() {
DCHECK(!queue_.empty());
PendingTask pending_task = std::move(queue_.front());
queue_.pop();
-
- if (pending_task.is_high_res)
- --outer_->pending_high_res_tasks_;
-
return pending_task;
}
@@ -157,18 +115,31 @@ bool IncomingTaskQueue::TriageQueue::HasTasks() {
void IncomingTaskQueue::TriageQueue::Clear() {
DCHECK_CALLED_ON_VALID_SEQUENCE(outer_->sequence_checker_);
- // Previously, MessageLoop would delete all tasks including delayed and
- // deferred tasks in a single round before attempting to reload from the
- // incoming queue to see if more tasks remained. This gave it a chance to
- // assess whether or not clearing should continue. As a result, while
- // reloading is automatic for getting and seeing if tasks exist, it is not
- // automatic for Clear().
- while (!queue_.empty()) {
- PendingTask pending_task = std::move(queue_.front());
- queue_.pop();
-
- if (pending_task.is_high_res)
- --outer_->pending_high_res_tasks_;
+
+ // Clear() should be invoked before WillDestroyCurrentMessageLoop().
+ DCHECK(outer_->accept_new_tasks_);
+
+ // Delete all currently pending tasks but not tasks potentially posted from
+ // their destructors. See ~MessageLoop() for the full logic mitigating against
+ // infite loops when clearing pending tasks. The ScopedClosureRunner below
+ // will be bound to a task posted at the end of the queue. After it is posted,
+ // tasks will be deleted one by one, when the bound ScopedClosureRunner is
+ // deleted and sets |deleted_all_originally_pending|, we know we've deleted
+ // all originally pending tasks.
+ bool deleted_all_originally_pending = false;
+ ScopedClosureRunner capture_deleted_all_originally_pending(BindOnce(
+ [](bool* deleted_all_originally_pending) {
+ *deleted_all_originally_pending = true;
+ },
+ Unretained(&deleted_all_originally_pending)));
+ outer_->AddToIncomingQueue(
+ FROM_HERE,
+ BindOnce([](ScopedClosureRunner) {},
+ std::move(capture_deleted_all_originally_pending)),
+ TimeDelta(), Nestable::kNestable);
+
+ while (!deleted_all_originally_pending) {
+ PendingTask pending_task = Pop();
if (!pending_task.delayed_run_time.is_null()) {
outer_->delayed_tasks().Push(std::move(pending_task));
@@ -179,47 +150,46 @@ void IncomingTaskQueue::TriageQueue::Clear() {
void IncomingTaskQueue::TriageQueue::ReloadFromIncomingQueueIfEmpty() {
DCHECK_CALLED_ON_VALID_SEQUENCE(outer_->sequence_checker_);
if (queue_.empty()) {
- // TODO(robliao): Since these high resolution tasks aren't yet in the
- // delayed queue, they technically shouldn't trigger high resolution timers
- // until they are.
- outer_->pending_high_res_tasks_ += outer_->ReloadWorkQueue(&queue_);
+ outer_->ReloadWorkQueue(&queue_);
}
}
-IncomingTaskQueue::DelayedQueue::DelayedQueue(IncomingTaskQueue* outer)
- : outer_(outer) {}
+IncomingTaskQueue::DelayedQueue::DelayedQueue() {
+ DETACH_FROM_SEQUENCE(sequence_checker_);
+}
IncomingTaskQueue::DelayedQueue::~DelayedQueue() = default;
void IncomingTaskQueue::DelayedQueue::Push(PendingTask pending_task) {
- DCHECK_CALLED_ON_VALID_SEQUENCE(outer_->sequence_checker_);
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
if (pending_task.is_high_res)
- ++outer_->pending_high_res_tasks_;
+ ++pending_high_res_tasks_;
queue_.push(std::move(pending_task));
}
const PendingTask& IncomingTaskQueue::DelayedQueue::Peek() {
- DCHECK_CALLED_ON_VALID_SEQUENCE(outer_->sequence_checker_);
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(!queue_.empty());
return queue_.top();
}
PendingTask IncomingTaskQueue::DelayedQueue::Pop() {
- DCHECK_CALLED_ON_VALID_SEQUENCE(outer_->sequence_checker_);
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(!queue_.empty());
PendingTask delayed_task = std::move(const_cast<PendingTask&>(queue_.top()));
queue_.pop();
if (delayed_task.is_high_res)
- --outer_->pending_high_res_tasks_;
+ --pending_high_res_tasks_;
+ DCHECK_GE(pending_high_res_tasks_, 0);
return delayed_task;
}
bool IncomingTaskQueue::DelayedQueue::HasTasks() {
- DCHECK_CALLED_ON_VALID_SEQUENCE(outer_->sequence_checker_);
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
// TODO(robliao): The other queues don't check for IsCancelled(). Should they?
while (!queue_.empty() && Peek().task.IsCancelled())
Pop();
@@ -228,52 +198,48 @@ bool IncomingTaskQueue::DelayedQueue::HasTasks() {
}
void IncomingTaskQueue::DelayedQueue::Clear() {
- DCHECK_CALLED_ON_VALID_SEQUENCE(outer_->sequence_checker_);
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
while (!queue_.empty())
Pop();
}
-IncomingTaskQueue::DeferredQueue::DeferredQueue(IncomingTaskQueue* outer)
- : outer_(outer) {}
+size_t IncomingTaskQueue::DelayedQueue::Size() const {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ return queue_.size();
+}
+
+IncomingTaskQueue::DeferredQueue::DeferredQueue() {
+ DETACH_FROM_SEQUENCE(sequence_checker_);
+}
IncomingTaskQueue::DeferredQueue::~DeferredQueue() = default;
void IncomingTaskQueue::DeferredQueue::Push(PendingTask pending_task) {
- DCHECK_CALLED_ON_VALID_SEQUENCE(outer_->sequence_checker_);
-
- // TODO(robliao): These tasks should not count towards the high res task count
- // since they are no longer in the delayed queue.
- if (pending_task.is_high_res)
- ++outer_->pending_high_res_tasks_;
-
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
queue_.push(std::move(pending_task));
}
const PendingTask& IncomingTaskQueue::DeferredQueue::Peek() {
- DCHECK_CALLED_ON_VALID_SEQUENCE(outer_->sequence_checker_);
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(!queue_.empty());
return queue_.front();
}
PendingTask IncomingTaskQueue::DeferredQueue::Pop() {
- DCHECK_CALLED_ON_VALID_SEQUENCE(outer_->sequence_checker_);
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(!queue_.empty());
PendingTask deferred_task = std::move(queue_.front());
queue_.pop();
-
- if (deferred_task.is_high_res)
- --outer_->pending_high_res_tasks_;
-
return deferred_task;
}
bool IncomingTaskQueue::DeferredQueue::HasTasks() {
- DCHECK_CALLED_ON_VALID_SEQUENCE(outer_->sequence_checker_);
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
return !queue_.empty();
}
void IncomingTaskQueue::DeferredQueue::Clear() {
- DCHECK_CALLED_ON_VALID_SEQUENCE(outer_->sequence_checker_);
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
while (!queue_.empty())
Pop();
}
@@ -283,35 +249,28 @@ bool IncomingTaskQueue::PostPendingTask(PendingTask* pending_task) {
// directly, as it could starve handling of foreign threads. Put every task
// into this queue.
bool accept_new_tasks;
- bool schedule_work = false;
+ bool was_empty = false;
{
AutoLock auto_lock(incoming_queue_lock_);
accept_new_tasks = accept_new_tasks_;
- if (accept_new_tasks)
- schedule_work = PostPendingTaskLockRequired(pending_task);
+ if (accept_new_tasks) {
+ was_empty =
+ PostPendingTaskLockRequired(pending_task) && triage_queue_empty_;
+ }
}
if (!accept_new_tasks) {
// Clear the pending task outside of |incoming_queue_lock_| to prevent any
// chance of self-deadlock if destroying a task also posts a task to this
// queue.
- DCHECK(!schedule_work);
pending_task->task.Reset();
return false;
}
- // Wake up the message loop and schedule work. This is done outside
- // |incoming_queue_lock_| to allow for multiple post tasks to occur while
- // ScheduleWork() is running. For platforms (e.g. Android) that require one
- // call to ScheduleWork() for each task, all pending tasks may serialize
- // within the ScheduleWork() call. As a result, holding a lock to maintain the
- // lifetime of |message_loop_| is less of a concern.
- if (schedule_work) {
- // Ensures |message_loop_| isn't destroyed while running.
- AutoLock auto_lock(message_loop_lock_);
- if (message_loop_)
- message_loop_->ScheduleWork();
- }
+ // Let |task_queue_observer_| know of the queued task. This is done outside
+ // |incoming_queue_lock_| to avoid conflating locks (DidQueueTask() can also
+ // use a lock).
+ task_queue_observer_->DidQueueTask(was_empty);
return true;
}
@@ -319,35 +278,19 @@ bool IncomingTaskQueue::PostPendingTask(PendingTask* pending_task) {
bool IncomingTaskQueue::PostPendingTaskLockRequired(PendingTask* pending_task) {
incoming_queue_lock_.AssertAcquired();
-#if defined(OS_WIN)
- if (pending_task->is_high_res)
- ++high_res_task_count_;
-#endif
-
// Initialize the sequence number. The sequence number is used for delayed
// tasks (to facilitate FIFO sorting when two tasks have the same
// delayed_run_time value) and for identifying the task in about:tracing.
pending_task->sequence_num = next_sequence_num_++;
- task_annotator_.DidQueueTask("MessageLoop::PostTask", *pending_task);
+ task_queue_observer_->WillQueueTask(pending_task);
bool was_empty = incoming_queue_.empty();
incoming_queue_.push(std::move(*pending_task));
-
- if (is_ready_for_scheduling_ &&
- (always_schedule_work_ || (!message_loop_scheduled_ && was_empty))) {
- // After we've scheduled the message loop, we do not need to do so again
- // until we know it has processed all of the work in our queue and is
- // waiting for more work again. The message loop will always attempt to
- // reload from the incoming queue before waiting again so we clear this
- // flag in ReloadWorkQueue().
- message_loop_scheduled_ = true;
- return true;
- }
- return false;
+ return was_empty;
}
-int IncomingTaskQueue::ReloadWorkQueue(TaskQueue* work_queue) {
+void IncomingTaskQueue::ReloadWorkQueue(TaskQueue* work_queue) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
// Make sure no tasks are lost.
@@ -355,18 +298,8 @@ int IncomingTaskQueue::ReloadWorkQueue(TaskQueue* work_queue) {
// Acquire all we can from the inter-thread queue with one lock acquisition.
AutoLock lock(incoming_queue_lock_);
- if (incoming_queue_.empty()) {
- // If the loop attempts to reload but there are no tasks in the incoming
- // queue, that means it will go to sleep waiting for more work. If the
- // incoming queue becomes nonempty we need to schedule it again.
- message_loop_scheduled_ = false;
- } else {
- incoming_queue_.swap(*work_queue);
- }
- // Reset the count of high resolution tasks since our queue is now empty.
- int high_res_tasks = high_res_task_count_;
- high_res_task_count_ = 0;
- return high_res_tasks;
+ incoming_queue_.swap(*work_queue);
+ triage_queue_empty_ = work_queue->empty();
}
} // namespace internal
diff --git a/chromium/base/message_loop/incoming_task_queue.h b/chromium/base/message_loop/incoming_task_queue.h
index f158d2a93ff..bdcd6d7a754 100644
--- a/chromium/base/message_loop/incoming_task_queue.h
+++ b/chromium/base/message_loop/incoming_task_queue.h
@@ -7,7 +7,6 @@
#include "base/base_export.h"
#include "base/callback.h"
-#include "base/debug/task_annotator.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/pending_task.h"
@@ -17,8 +16,7 @@
namespace base {
-class MessageLoop;
-class PostTaskTest;
+class BasicPostTaskPerfTest;
namespace internal {
@@ -28,6 +26,25 @@ namespace internal {
class BASE_EXPORT IncomingTaskQueue
: public RefCountedThreadSafe<IncomingTaskQueue> {
public:
+ // TODO(gab): Move this to SequencedTaskSource::Observer in
+ // https://chromium-review.googlesource.com/c/chromium/src/+/1088762.
+ class Observer {
+ public:
+ virtual ~Observer() = default;
+
+ // Notifies this Observer that it is about to enqueue |task|. The Observer
+ // may alter |task| as a result (e.g. add metadata to the PendingTask
+ // struct). This may be called while holding a lock and shouldn't perform
+ // logic requiring synchronization (override DidQueueTask() for that).
+ virtual void WillQueueTask(PendingTask* task) = 0;
+
+ // Notifies this Observer that a task was queued in the IncomingTaskQueue it
+ // observes. |was_empty| is true if the task source was empty (i.e.
+ // |!HasTasks()|) before this task was posted. DidQueueTask() can be invoked
+ // from any thread.
+ virtual void DidQueueTask(bool was_empty) = 0;
+ };
+
// Provides a read and remove only view into a task queue.
class ReadAndRemoveOnlyQueue {
public:
@@ -63,7 +80,13 @@ class BASE_EXPORT IncomingTaskQueue
DISALLOW_COPY_AND_ASSIGN(Queue);
};
- explicit IncomingTaskQueue(MessageLoop* message_loop);
+ // Constructs an IncomingTaskQueue which will invoke |task_queue_observer|
+ // when tasks are queued. |task_queue_observer| will be bound to this
+ // IncomingTaskQueue's lifetime. Ownership is required as opposed to a raw
+ // pointer since IncomingTaskQueue is ref-counted. For the same reasons,
+ // |task_queue_observer| needs to support being invoked racily during
+ // shutdown).
+ explicit IncomingTaskQueue(std::unique_ptr<Observer> task_queue_observer);
// Appends a task to the incoming queue. Posting of all tasks is routed though
// AddToIncomingQueue() or TryAddToIncomingQueue() to make sure that posting
@@ -77,15 +100,11 @@ class BASE_EXPORT IncomingTaskQueue
TimeDelta delay,
Nestable nestable);
- // Disconnects |this| from the parent message loop.
- void WillDestroyCurrentMessageLoop();
-
- // This should be called when the message loop becomes ready for
- // scheduling work.
- void StartScheduling();
-
- // Runs |pending_task|.
- void RunTask(PendingTask* pending_task);
+ // Instructs this IncomingTaskQueue to stop accepting tasks, this cannot be
+ // undone. Note that the registered IncomingTaskQueue::Observer may still
+ // racily receive a few DidQueueTask() calls while the Shutdown() signal
+ // propagates to other threads and it needs to support that.
+ void Shutdown();
ReadAndRemoveOnlyQueue& triage_tasks() { return triage_tasks_; }
@@ -93,13 +112,17 @@ class BASE_EXPORT IncomingTaskQueue
Queue& deferred_tasks() { return deferred_tasks_; }
- bool HasPendingHighResolutionTasks() {
+ bool HasPendingHighResolutionTasks() const {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
- return pending_high_res_tasks_ > 0;
+ return delayed_tasks_.HasPendingHighResolutionTasks();
}
+ // Reports UMA metrics about its queues before the MessageLoop goes to sleep
+ // per being idle.
+ void ReportMetricsOnIdle() const;
+
private:
- friend class base::PostTaskTest;
+ friend class base::BasicPostTaskPerfTest;
friend class RefCountedThreadSafe<IncomingTaskQueue>;
// These queues below support the previous MessageLoop behavior of
@@ -129,9 +152,9 @@ class BASE_EXPORT IncomingTaskQueue
~TriageQueue() override;
// ReadAndRemoveOnlyQueue:
- // In general, the methods below will attempt to reload from the incoming
- // queue if the queue itself is empty except for Clear(). See Clear() for
- // why it doesn't reload.
+ // The methods below will attempt to reload from the incoming queue if the
+ // queue itself is empty (Clear() has special logic to reload only once
+ // should destructors post more tasks).
const PendingTask& Peek() override;
PendingTask Pop() override;
// Whether this queue has tasks after reloading from the incoming queue.
@@ -149,7 +172,7 @@ class BASE_EXPORT IncomingTaskQueue
class DelayedQueue : public Queue {
public:
- DelayedQueue(IncomingTaskQueue* outer);
+ DelayedQueue();
~DelayedQueue() override;
// Queue:
@@ -160,16 +183,25 @@ class BASE_EXPORT IncomingTaskQueue
void Clear() override;
void Push(PendingTask pending_task) override;
+ size_t Size() const;
+ bool HasPendingHighResolutionTasks() const {
+ return pending_high_res_tasks_ > 0;
+ }
+
private:
- IncomingTaskQueue* const outer_;
DelayedTaskQueue queue_;
+ // Number of high resolution tasks in |queue_|.
+ int pending_high_res_tasks_ = 0;
+
+ SEQUENCE_CHECKER(sequence_checker_);
+
DISALLOW_COPY_AND_ASSIGN(DelayedQueue);
};
class DeferredQueue : public Queue {
public:
- DeferredQueue(IncomingTaskQueue* outer);
+ DeferredQueue();
~DeferredQueue() override;
// Queue:
@@ -180,9 +212,10 @@ class BASE_EXPORT IncomingTaskQueue
void Push(PendingTask pending_task) override;
private:
- IncomingTaskQueue* const outer_;
TaskQueue queue_;
+ SEQUENCE_CHECKER(sequence_checker_);
+
DISALLOW_COPY_AND_ASSIGN(DeferredQueue);
};
@@ -194,23 +227,18 @@ class BASE_EXPORT IncomingTaskQueue
// does not retain |pending_task->task| beyond this function call.
bool PostPendingTask(PendingTask* pending_task);
- // Does the real work of posting a pending task. Returns true if the caller
- // should call ScheduleWork() on the message loop.
+ // Does the real work of posting a pending task. Returns true if
+ // |incoming_queue_| was empty before |pending_task| was posted.
bool PostPendingTaskLockRequired(PendingTask* pending_task);
// Loads tasks from the |incoming_queue_| into |*work_queue|. Must be called
- // from the sequence processing the tasks. Returns the number of tasks that
- // require high resolution timers in |work_queue|.
- int ReloadWorkQueue(TaskQueue* work_queue);
+ // from the sequence processing the tasks.
+ void ReloadWorkQueue(TaskQueue* work_queue);
// Checks calls made only on the MessageLoop thread.
SEQUENCE_CHECKER(sequence_checker_);
- debug::TaskAnnotator task_annotator_;
-
- // True if we always need to call ScheduleWork when receiving a new task, even
- // if the incoming queue was not empty.
- const bool always_schedule_work_;
+ const std::unique_ptr<Observer> task_queue_observer_;
// Queue for initial triaging of tasks on the |sequence_checker_| sequence.
TriageQueue triage_tasks_;
@@ -221,23 +249,9 @@ class BASE_EXPORT IncomingTaskQueue
// Queue for non-nestable deferred tasks on the |sequence_checker_| sequence.
DeferredQueue deferred_tasks_;
- // Number of high resolution tasks in the sequence affine queues above.
- int pending_high_res_tasks_ = 0;
-
- // Lock that serializes |message_loop_->ScheduleWork()| calls as well as
- // prevents |message_loop_| from being made nullptr during such a call.
- base::Lock message_loop_lock_;
-
- // Points to the message loop that owns |this|.
- MessageLoop* message_loop_;
-
// Synchronizes access to all members below this line.
base::Lock incoming_queue_lock_;
- // Number of tasks that require high resolution timing. This value is kept
- // so that ReloadWorkQueue() completes in constant time.
- int high_res_task_count_ = 0;
-
// An incoming queue of tasks that are acquired under a mutex for processing
// on this instance's thread. These tasks have not yet been been pushed to
// |triage_tasks_|.
@@ -249,12 +263,12 @@ class BASE_EXPORT IncomingTaskQueue
// The next sequence number to use for delayed tasks.
int next_sequence_num_ = 0;
- // True if our message loop has already been scheduled and does not need to be
- // scheduled again until an empty reload occurs.
- bool message_loop_scheduled_ = false;
-
- // False until StartScheduling() is called.
- bool is_ready_for_scheduling_ = false;
+ // True if the outgoing queue (|triage_tasks_|) is empty. Toggled under
+ // |incoming_queue_lock_| in ReloadWorkQueue() so that
+ // PostPendingTaskLockRequired() can tell, without accessing the thread unsafe
+ // |triage_tasks_|, if the IncomingTaskQueue has been made non-empty by a
+ // PostTask() (and needs to inform its Observer).
+ bool triage_queue_empty_ = true;
DISALLOW_COPY_AND_ASSIGN(IncomingTaskQueue);
};
diff --git a/chromium/base/message_loop/message_loop.cc b/chromium/base/message_loop/message_loop.cc
index 97ed778d8f1..3723960a5c0 100644
--- a/chromium/base/message_loop/message_loop.cc
+++ b/chromium/base/message_loop/message_loop.cc
@@ -9,6 +9,7 @@
#include "base/bind.h"
#include "base/compiler_specific.h"
+#include "base/debug/task_annotator.h"
#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "base/message_loop/message_pump_default.h"
@@ -36,6 +37,95 @@ std::unique_ptr<MessagePump> ReturnPump(std::unique_ptr<MessagePump> pump) {
} // namespace
+class MessageLoop::Controller : public internal::IncomingTaskQueue::Observer {
+ public:
+ // Constructs a MessageLoopController which controls |message_loop|, notifying
+ // |task_annotator_| when tasks are queued scheduling work on |message_loop|
+ // as fits. |message_loop| and |task_annotator_| will not be used after
+ // DisconnectFromParent() returns.
+ Controller(MessageLoop* message_loop);
+
+ ~Controller() override;
+
+ // IncomingTaskQueue::Observer:
+ void WillQueueTask(PendingTask* task) final;
+ void DidQueueTask(bool was_empty) final;
+
+ void StartScheduling();
+
+ // Disconnects |message_loop_| from this Controller instance (DidQueueTask()
+ // will no-op from this point forward).
+ void DisconnectFromParent();
+
+ // Shares this Controller's TaskAnnotator with MessageLoop as TaskAnnotator
+ // requires DidQueueTask(x)/RunTask(x) to be invoked on the same TaskAnnotator
+ // instance.
+ debug::TaskAnnotator& task_annotator() { return task_annotator_; }
+
+ private:
+ // A TaskAnnotator which is owned by this Controller to be able to use it
+ // without locking |message_loop_lock_|. It cannot be owned by MessageLoop
+ // because this Controller cannot access |message_loop_| safely without the
+ // lock. Note: the TaskAnnotator API itself is thread-safe.
+ debug::TaskAnnotator task_annotator_;
+
+ // Lock that serializes |message_loop_->ScheduleWork()| and access to all
+ // members below.
+ base::Lock message_loop_lock_;
+
+ // Points to this Controller's outer MessageLoop instance. Null after
+ // DisconnectFromParent().
+ MessageLoop* message_loop_;
+
+ // False until StartScheduling() is called.
+ bool is_ready_for_scheduling_ = false;
+
+ // True if DidQueueTask() has been called before StartScheduling(); letting it
+ // know whether it needs to ScheduleWork() right away or not.
+ bool pending_schedule_work_ = false;
+
+ DISALLOW_COPY_AND_ASSIGN(Controller);
+};
+
+MessageLoop::Controller::Controller(MessageLoop* message_loop)
+ : message_loop_(message_loop) {}
+
+MessageLoop::Controller::~Controller() {
+ DCHECK(!message_loop_)
+ << "DisconnectFromParent() needs to be invoked before destruction.";
+}
+
+void MessageLoop::Controller::WillQueueTask(PendingTask* task) {
+ task_annotator_.WillQueueTask("MessageLoop::PostTask", task);
+}
+
+void MessageLoop::Controller::DidQueueTask(bool was_empty) {
+ // Avoid locking if we don't need to schedule.
+ if (!was_empty)
+ return;
+
+ AutoLock auto_lock(message_loop_lock_);
+
+ if (message_loop_ && is_ready_for_scheduling_)
+ message_loop_->ScheduleWork();
+ else
+ pending_schedule_work_ = true;
+}
+
+void MessageLoop::Controller::StartScheduling() {
+ AutoLock lock(message_loop_lock_);
+ DCHECK(message_loop_);
+ DCHECK(!is_ready_for_scheduling_);
+ is_ready_for_scheduling_ = true;
+ if (pending_schedule_work_)
+ message_loop_->ScheduleWork();
+}
+
+void MessageLoop::Controller::DisconnectFromParent() {
+ AutoLock lock(message_loop_lock_);
+ message_loop_ = nullptr;
+}
+
//------------------------------------------------------------------------------
MessageLoop::MessageLoop(Type type)
@@ -94,7 +184,8 @@ MessageLoop::~MessageLoop() {
thread_task_runner_handle_.reset();
// Tell the incoming queue that we are dying.
- incoming_task_queue_->WillDestroyCurrentMessageLoop();
+ message_loop_controller_->DisconnectFromParent();
+ incoming_task_queue_->Shutdown();
incoming_task_queue_ = nullptr;
unbound_task_runner_ = nullptr;
task_runner_ = nullptr;
@@ -192,13 +283,18 @@ std::unique_ptr<MessageLoop> MessageLoop::CreateUnbound(
return WrapUnique(new MessageLoop(type, std::move(pump_factory)));
}
+// TODO(gab): Avoid bare new + WrapUnique below when introducing
+// SequencedTaskSource in follow-up @
+// https://chromium-review.googlesource.com/c/chromium/src/+/1088762.
MessageLoop::MessageLoop(Type type, MessagePumpFactoryCallback pump_factory)
: MessageLoopCurrent(this),
type_(type),
pump_factory_(std::move(pump_factory)),
- incoming_task_queue_(new internal::IncomingTaskQueue(this)),
- unbound_task_runner_(
- new internal::MessageLoopTaskRunner(incoming_task_queue_)),
+ message_loop_controller_(new Controller(this)),
+ incoming_task_queue_(MakeRefCounted<internal::IncomingTaskQueue>(
+ WrapUnique(message_loop_controller_))),
+ unbound_task_runner_(MakeRefCounted<internal::MessageLoopTaskRunner>(
+ incoming_task_queue_)),
task_runner_(unbound_task_runner_) {
// If type is TYPE_CUSTOM non-null pump_factory must be given.
DCHECK(type_ != TYPE_CUSTOM || !pump_factory_.is_null());
@@ -220,7 +316,7 @@ void MessageLoop::BindToCurrentThread() {
<< "should only have one message loop per thread";
MessageLoopCurrent::BindToCurrentThreadInternal(this);
- incoming_task_queue_->StartScheduling();
+ message_loop_controller_->StartScheduling();
unbound_task_runner_->BindToCurrentThread();
unbound_task_runner_ = nullptr;
SetThreadTaskRunnerHandle();
@@ -231,6 +327,12 @@ void MessageLoop::BindToCurrentThread() {
&sequence_local_storage_map_);
RunLoop::RegisterDelegateForCurrentThread(this);
+
+#if defined(OS_ANDROID)
+ // On Android, attach to the native loop when there is one.
+ if (type_ == TYPE_UI || type_ == TYPE_JAVA)
+ static_cast<MessagePumpForUI*>(pump_.get())->Attach(this);
+#endif
}
std::string MessageLoop::GetThreadName() const {
@@ -316,7 +418,8 @@ void MessageLoop::RunTask(PendingTask* pending_task) {
for (auto& observer : task_observers_)
observer.WillProcessTask(*pending_task);
- incoming_task_queue_->RunTask(pending_task);
+ message_loop_controller_->task_annotator().RunTask("MessageLoop::PostTask",
+ pending_task);
for (auto& observer : task_observers_)
observer.DidProcessTask(*pending_task);
@@ -351,6 +454,10 @@ void MessageLoop::ScheduleWork() {
pump_->ScheduleWork();
}
+TimeTicks MessageLoop::CapAtOneDay(TimeTicks next_run_time) {
+ return std::min(next_run_time, recent_time_ + TimeDelta::FromDays(1));
+}
+
bool MessageLoop::DoWork() {
if (!task_execution_allowed_)
return false;
@@ -382,7 +489,7 @@ bool MessageLoop::DoWork() {
bool MessageLoop::DoDelayedWork(TimeTicks* next_delayed_work_time) {
if (!task_execution_allowed_ ||
!incoming_task_queue_->delayed_tasks().HasTasks()) {
- recent_time_ = *next_delayed_work_time = TimeTicks();
+ *next_delayed_work_time = TimeTicks();
return false;
}
@@ -395,10 +502,11 @@ bool MessageLoop::DoDelayedWork(TimeTicks* next_delayed_work_time) {
TimeTicks next_run_time =
incoming_task_queue_->delayed_tasks().Peek().delayed_run_time;
+
if (next_run_time > recent_time_) {
recent_time_ = TimeTicks::Now(); // Get a better view of Now();
if (next_run_time > recent_time_) {
- *next_delayed_work_time = next_run_time;
+ *next_delayed_work_time = CapAtOneDay(next_run_time);
return false;
}
}
@@ -406,8 +514,8 @@ bool MessageLoop::DoDelayedWork(TimeTicks* next_delayed_work_time) {
PendingTask pending_task = incoming_task_queue_->delayed_tasks().Pop();
if (incoming_task_queue_->delayed_tasks().HasTasks()) {
- *next_delayed_work_time =
- incoming_task_queue_->delayed_tasks().Peek().delayed_run_time;
+ *next_delayed_work_time = CapAtOneDay(
+ incoming_task_queue_->delayed_tasks().Peek().delayed_run_time);
}
return DeferOrRunPendingTask(std::move(pending_task));
@@ -417,21 +525,42 @@ bool MessageLoop::DoIdleWork() {
if (ProcessNextDelayedNonNestableTask())
return true;
- if (ShouldQuitWhenIdle())
+#if defined(OS_WIN)
+ bool need_high_res_timers = false;
+#endif
+
+ // Do not report idle metrics if about to quit the loop and/or in a nested
+ // loop where |!task_execution_allowed_|. In the former case, the loop isn't
+ // going to sleep and in the latter case DoDelayedWork() will not actually do
+ // the work this is prepping for.
+ if (ShouldQuitWhenIdle()) {
pump_->Quit();
+ } else if (task_execution_allowed_) {
+ // Only track idle metrics in MessageLoopForUI to avoid too much contention
+ // logging the histogram (https://crbug.com/860801) -- there's typically
+ // only one UI thread per process and, for practical purposes, restricting
+ // the MessageLoop diagnostic metrics to it yields similar information.
+ if (type_ == TYPE_UI)
+ incoming_task_queue_->ReportMetricsOnIdle();
+
+#if defined(OS_WIN)
+ // On Windows we activate the high resolution timer so that the wait
+ // _if_ triggered by the timer happens with good resolution. If we don't
+ // do this the default resolution is 15ms which might not be acceptable
+ // for some tasks.
+ need_high_res_timers =
+ incoming_task_queue_->HasPendingHighResolutionTasks();
+#endif
+ }
- // When we return we will do a kernel wait for more tasks.
#if defined(OS_WIN)
- // On Windows we activate the high resolution timer so that the wait
- // _if_ triggered by the timer happens with good resolution. If we don't
- // do this the default resolution is 15ms which might not be acceptable
- // for some tasks.
- bool high_res = incoming_task_queue_->HasPendingHighResolutionTasks();
- if (high_res != in_high_res_mode_) {
- in_high_res_mode_ = high_res;
+ if (in_high_res_mode_ != need_high_res_timers) {
+ in_high_res_mode_ = need_high_res_timers;
Time::ActivateHighResolutionTimer(in_high_res_mode_);
}
#endif
+
+ // When we return we will do a kernel wait for more tasks.
return false;
}
@@ -440,8 +569,13 @@ bool MessageLoop::DoIdleWork() {
//------------------------------------------------------------------------------
// MessageLoopForUI
-MessageLoopForUI::MessageLoopForUI(std::unique_ptr<MessagePump> pump)
- : MessageLoop(TYPE_UI, BindOnce(&ReturnPump, std::move(pump))) {}
+MessageLoopForUI::MessageLoopForUI(Type type) : MessageLoop(type) {
+#if defined(OS_ANDROID)
+ DCHECK(type == TYPE_UI || type == TYPE_JAVA);
+#else
+ DCHECK_EQ(type, TYPE_UI);
+#endif
+}
// static
MessageLoopCurrentForUI MessageLoopForUI::current() {
@@ -460,14 +594,18 @@ void MessageLoopForUI::Attach() {
#endif // defined(OS_IOS)
#if defined(OS_ANDROID)
-void MessageLoopForUI::Start() {
- // No Histogram support for UI message loop as it is managed by Java side
- static_cast<MessagePumpForUI*>(pump_.get())->Start(this);
-}
-
void MessageLoopForUI::Abort() {
static_cast<MessagePumpForUI*>(pump_.get())->Abort();
}
+
+bool MessageLoopForUI::IsAborted() {
+ return static_cast<MessagePumpForUI*>(pump_.get())->IsAborted();
+}
+
+void MessageLoopForUI::QuitWhenIdle(base::OnceClosure callback) {
+ static_cast<MessagePumpForUI*>(pump_.get())
+ ->QuitWhenIdle(std::move(callback));
+}
#endif // defined(OS_ANDROID)
#if defined(OS_WIN)
diff --git a/chromium/base/message_loop/message_loop.h b/chromium/base/message_loop/message_loop.h
index bd191fd9779..7c31a128b53 100644
--- a/chromium/base/message_loop/message_loop.h
+++ b/chromium/base/message_loop/message_loop.h
@@ -222,6 +222,8 @@ class BASE_EXPORT MessageLoop : public MessagePump::Delegate,
friend class Thread;
FRIEND_TEST_ALL_PREFIXES(MessageLoopTest, DeleteUnboundLoop);
+ class Controller;
+
// Creates a MessageLoop without binding to a thread.
// If |type| is TYPE_CUSTOM non-null |pump_factory| must be also given
// to create a message pump for this message loop. Otherwise a default
@@ -261,6 +263,14 @@ class BASE_EXPORT MessageLoop : public MessagePump::Delegate,
// responsible for synchronizing ScheduleWork() calls.
void ScheduleWork();
+ // Returns |next_run_time| capped at 1 day from |recent_time_|. This is used
+ // to mitigate https://crbug.com/850450 where some platforms are unhappy with
+ // delays > 100,000,000 seconds. In practice, a diagnosis metric showed that
+ // no sleep > 1 hour ever completes (always interrupted by an earlier
+ // MessageLoop event) and 99% of completed sleeps are the ones scheduled for
+ // <= 1 second. Details @ https://crrev.com/c/1142589.
+ TimeTicks CapAtOneDay(TimeTicks next_run_time);
+
// MessagePump::Delegate methods:
bool DoWork() override;
bool DoDelayedWork(TimeTicks* next_delayed_work_time) override;
@@ -295,6 +305,9 @@ class BASE_EXPORT MessageLoop : public MessagePump::Delegate,
ObserverList<TaskObserver> task_observers_;
+ // Pointer to this MessageLoop's Controller, valid until the reference to
+ // |incoming_task_queue_| is dropped below.
+ Controller* const message_loop_controller_;
scoped_refptr<internal::IncomingTaskQueue> incoming_task_queue_;
// A task runner which we haven't bound to a thread yet.
@@ -337,10 +350,7 @@ class BASE_EXPORT MessageLoop : public MessagePump::Delegate,
//
class BASE_EXPORT MessageLoopForUI : public MessageLoop {
public:
- MessageLoopForUI() : MessageLoop(TYPE_UI) {
- }
-
- explicit MessageLoopForUI(std::unique_ptr<MessagePump> pump);
+ explicit MessageLoopForUI(Type type = TYPE_UI);
// TODO(gab): Mass migrate callers to MessageLoopCurrentForUI::Get()/IsSet().
static MessageLoopCurrentForUI current();
@@ -354,14 +364,18 @@ class BASE_EXPORT MessageLoopForUI : public MessageLoop {
#endif
#if defined(OS_ANDROID)
- // On Android, the UI message loop is handled by Java side. So Run() should
- // never be called. Instead use Start(), which will forward all the native UI
- // events to the Java message loop.
- void Start();
-
- // In Android there are cases where we want to abort immediately without
+ // On Android there are cases where we want to abort immediately without
// calling Quit(), in these cases we call Abort().
void Abort();
+
+ // True if this message pump has been aborted.
+ bool IsAborted();
+
+ // Since Run() is never called on Android, and the message loop is run by the
+ // java Looper, quitting the RunLoop won't join the thread, so we need a
+ // callback to run when the RunLoop goes idle to let the Java thread know when
+ // it can safely quit.
+ void QuitWhenIdle(base::OnceClosure callback);
#endif
#if defined(OS_WIN)
diff --git a/chromium/base/message_loop/message_loop_current.cc b/chromium/base/message_loop/message_loop_current.cc
index 0beef5ae0fc..4959b70e067 100644
--- a/chromium/base/message_loop/message_loop_current.cc
+++ b/chromium/base/message_loop/message_loop_current.cc
@@ -168,10 +168,6 @@ void MessageLoopCurrentForUI::Attach() {
#endif // defined(OS_IOS)
#if defined(OS_ANDROID)
-void MessageLoopCurrentForUI::Start() {
- static_cast<MessageLoopForUI*>(current_)->Start();
-}
-
void MessageLoopCurrentForUI::Abort() {
static_cast<MessageLoopForUI*>(current_)->Abort();
}
@@ -201,11 +197,11 @@ bool MessageLoopCurrentForIO::IsSet() {
#if !defined(OS_NACL_SFI)
#if defined(OS_WIN)
-void MessageLoopCurrentForIO::RegisterIOHandler(
+HRESULT MessageLoopCurrentForIO::RegisterIOHandler(
HANDLE file,
MessagePumpForIO::IOHandler* handler) {
DCHECK_CALLED_ON_VALID_THREAD(current_->bound_thread_checker_);
- pump_->RegisterIOHandler(file, handler);
+ return pump_->RegisterIOHandler(file, handler);
}
bool MessageLoopCurrentForIO::RegisterJobObject(
diff --git a/chromium/base/message_loop/message_loop_current.h b/chromium/base/message_loop/message_loop_current.h
index c5016dcf20a..61d1607e31e 100644
--- a/chromium/base/message_loop/message_loop_current.h
+++ b/chromium/base/message_loop/message_loop_current.h
@@ -225,12 +225,6 @@ class BASE_EXPORT MessageLoopCurrentForUI : public MessageLoopCurrent {
#endif
#if defined(OS_ANDROID)
- // Forwards to MessageLoopForUI::Start().
- // TODO(https://crbug.com/825327): Plumb the actual MessageLoopForUI* to
- // callers and remove ability to access this method from
- // MessageLoopCurrentForUI.
- void Start();
-
// Forwards to MessageLoopForUI::Abort().
// TODO(https://crbug.com/825327): Plumb the actual MessageLoopForUI* to
// callers and remove ability to access this method from
@@ -265,7 +259,7 @@ class BASE_EXPORT MessageLoopCurrentForIO : public MessageLoopCurrent {
#if defined(OS_WIN)
// Please see MessagePumpWin for definitions of these methods.
- void RegisterIOHandler(HANDLE file, MessagePumpForIO::IOHandler* handler);
+ HRESULT RegisterIOHandler(HANDLE file, MessagePumpForIO::IOHandler* handler);
bool RegisterJobObject(HANDLE job, MessagePumpForIO::IOHandler* handler);
bool WaitForIOCompletion(DWORD timeout, MessagePumpForIO::IOHandler* filter);
#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
diff --git a/chromium/base/message_loop/message_loop_task_runner_perftest.cc b/chromium/base/message_loop/message_loop_task_runner_perftest.cc
new file mode 100644
index 00000000000..3ab9ba2dcfd
--- /dev/null
+++ b/chromium/base/message_loop/message_loop_task_runner_perftest.cc
@@ -0,0 +1,191 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_loop/message_loop_task_runner.h"
+
+#include <string>
+#include <utility>
+
+#include "base/bind_helpers.h"
+#include "base/callback.h"
+#include "base/debug/task_annotator.h"
+#include "base/macros.h"
+#include "base/memory/scoped_refptr.h"
+#include "base/message_loop/incoming_task_queue.h"
+#include "base/message_loop/message_loop.h"
+#include "base/message_loop/message_loop_task_runner.h"
+#include "base/message_loop/message_pump.h"
+#include "base/run_loop.h"
+#include "base/strings/stringprintf.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/perf/perf_test.h"
+
+namespace base {
+
+namespace {
+
+// Tests below will post tasks in a loop until |kPostTaskPerfTestDuration| has
+// elapsed.
+constexpr TimeDelta kPostTaskPerfTestDuration =
+ base::TimeDelta::FromSeconds(30);
+
+} // namespace
+
+class FakeObserver : public internal::IncomingTaskQueue::Observer {
+ public:
+ // IncomingTaskQueue::Observer
+ void WillQueueTask(PendingTask* task) override {}
+ void DidQueueTask(bool was_empty) override {}
+
+ virtual void RunTask(PendingTask* task) { std::move(task->task).Run(); }
+};
+
+// Exercises MessageLoopTaskRunner's multi-threaded queue in isolation.
+class BasicPostTaskPerfTest : public testing::Test {
+ public:
+ void Run(int batch_size,
+ int tasks_per_reload,
+ std::unique_ptr<FakeObserver> task_source_observer) {
+ base::TimeTicks start = base::TimeTicks::Now();
+ base::TimeTicks now;
+ FakeObserver* task_source_observer_raw = task_source_observer.get();
+ scoped_refptr<internal::IncomingTaskQueue> queue(
+ base::MakeRefCounted<internal::IncomingTaskQueue>(
+ std::move(task_source_observer)));
+ scoped_refptr<SingleThreadTaskRunner> task_runner(
+ base::MakeRefCounted<internal::MessageLoopTaskRunner>(queue));
+ uint32_t num_posted = 0;
+ do {
+ for (int i = 0; i < batch_size; ++i) {
+ for (int j = 0; j < tasks_per_reload; ++j) {
+ task_runner->PostTask(FROM_HERE, DoNothing());
+ num_posted++;
+ }
+ TaskQueue loop_local_queue;
+ queue->ReloadWorkQueue(&loop_local_queue);
+ while (!loop_local_queue.empty()) {
+ PendingTask t = std::move(loop_local_queue.front());
+ loop_local_queue.pop();
+ task_source_observer_raw->RunTask(&t);
+ }
+ }
+
+ now = base::TimeTicks::Now();
+ } while (now - start < kPostTaskPerfTestDuration);
+ std::string trace = StringPrintf("%d_tasks_per_reload", tasks_per_reload);
+ perf_test::PrintResult(
+ "task", "", trace,
+ (now - start).InMicroseconds() / static_cast<double>(num_posted),
+ "us/task", true);
+ }
+};
+
+TEST_F(BasicPostTaskPerfTest, OneTaskPerReload) {
+ Run(10000, 1, std::make_unique<FakeObserver>());
+}
+
+TEST_F(BasicPostTaskPerfTest, TenTasksPerReload) {
+ Run(10000, 10, std::make_unique<FakeObserver>());
+}
+
+TEST_F(BasicPostTaskPerfTest, OneHundredTasksPerReload) {
+ Run(1000, 100, std::make_unique<FakeObserver>());
+}
+
+class StubMessagePump : public MessagePump {
+ public:
+ StubMessagePump() = default;
+ ~StubMessagePump() override = default;
+
+ // MessagePump:
+ void Run(Delegate* delegate) override {}
+ void Quit() override {}
+ void ScheduleWork() override {}
+ void ScheduleDelayedWork(const TimeTicks& delayed_work_time) override {}
+};
+
+// Simulates the overhead of hooking TaskAnnotator and ScheduleWork() to the
+// post task machinery.
+class FakeObserverSimulatingOverhead : public FakeObserver {
+ public:
+ FakeObserverSimulatingOverhead() = default;
+
+ // FakeObserver:
+ void WillQueueTask(PendingTask* task) final {
+ task_annotator_.WillQueueTask("MessageLoop::PostTask", task);
+ }
+
+ void DidQueueTask(bool was_empty) final {
+ AutoLock scoped_lock(message_loop_lock_);
+ pump_->ScheduleWork();
+ }
+
+ void RunTask(PendingTask* task) final {
+ task_annotator_.RunTask("MessageLoop::PostTask", task);
+ }
+
+ private:
+ // Simulates overhead from ScheduleWork() and TaskAnnotator calls involved in
+ // a real PostTask (stores the StubMessagePump in a pointer to force a virtual
+ // dispatch for ScheduleWork() and be closer to reality).
+ Lock message_loop_lock_;
+ std::unique_ptr<MessagePump> pump_{std::make_unique<StubMessagePump>()};
+ debug::TaskAnnotator task_annotator_;
+
+ DISALLOW_COPY_AND_ASSIGN(FakeObserverSimulatingOverhead);
+};
+
+TEST_F(BasicPostTaskPerfTest, OneTaskPerReloadWithOverhead) {
+ Run(10000, 1, std::make_unique<FakeObserverSimulatingOverhead>());
+}
+
+TEST_F(BasicPostTaskPerfTest, TenTasksPerReloadWithOverhead) {
+ Run(10000, 10, std::make_unique<FakeObserverSimulatingOverhead>());
+}
+
+TEST_F(BasicPostTaskPerfTest, OneHundredTasksPerReloadWithOverhead) {
+ Run(1000, 100, std::make_unique<FakeObserverSimulatingOverhead>());
+}
+
+// Exercises the full MessageLoop/RunLoop machinery.
+class IntegratedPostTaskPerfTest : public testing::Test {
+ public:
+ void Run(int batch_size, int tasks_per_reload) {
+ base::TimeTicks start = base::TimeTicks::Now();
+ base::TimeTicks now;
+ MessageLoop loop;
+ uint32_t num_posted = 0;
+ do {
+ for (int i = 0; i < batch_size; ++i) {
+ for (int j = 0; j < tasks_per_reload; ++j) {
+ loop->task_runner()->PostTask(FROM_HERE, DoNothing());
+ num_posted++;
+ }
+ RunLoop().RunUntilIdle();
+ }
+
+ now = base::TimeTicks::Now();
+ } while (now - start < kPostTaskPerfTestDuration);
+ std::string trace = StringPrintf("%d_tasks_per_reload", tasks_per_reload);
+ perf_test::PrintResult(
+ "task", "", trace,
+ (now - start).InMicroseconds() / static_cast<double>(num_posted),
+ "us/task", true);
+ }
+};
+
+TEST_F(IntegratedPostTaskPerfTest, OneTaskPerReload) {
+ Run(10000, 1);
+}
+
+TEST_F(IntegratedPostTaskPerfTest, TenTasksPerReload) {
+ Run(10000, 10);
+}
+
+TEST_F(IntegratedPostTaskPerfTest, OneHundredTasksPerReload) {
+ Run(1000, 100);
+}
+
+} // namespace base
diff --git a/chromium/base/message_loop/message_loop_unittest.cc b/chromium/base/message_loop/message_loop_unittest.cc
index 1c0911319dd..202c1d6215a 100644
--- a/chromium/base/message_loop/message_loop_unittest.cc
+++ b/chromium/base/message_loop/message_loop_unittest.cc
@@ -23,6 +23,8 @@
#include "base/single_thread_task_runner.h"
#include "base/synchronization/waitable_event.h"
#include "base/task_scheduler/task_scheduler.h"
+#include "base/test/gtest_util.h"
+#include "base/test/metrics/histogram_tester.h"
#include "base/test/test_simple_task_runner.h"
#include "base/test/test_timeouts.h"
#include "base/threading/platform_thread.h"
@@ -1626,6 +1628,8 @@ TEST_P(MessageLoopTypedTest, RunLoopQuitOrderAfter) {
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
BindOnce(&FuncThatQuitsNow));
+ run_loop.allow_quit_current_deprecated_ = true;
+
RunLoop outer_run_loop;
outer_run_loop.Run();
@@ -1650,7 +1654,13 @@ TEST_P(MessageLoopTypedTest, RunLoopQuitOrderAfter) {
// On Linux, the pipe buffer size is 64KiB by default. The bug caused one
// byte accumulated in the pipe per two posts, so we should repeat 128K
// times to reproduce the bug.
-TEST_P(MessageLoopTypedTest, RecursivePosts) {
+#if defined(OS_FUCHSIA)
+// TODO(crbug.com/810077): This is flaky on Fuchsia.
+#define MAYBE_RecursivePosts DISABLED_RecursivePosts
+#else
+#define MAYBE_RecursivePosts RecursivePosts
+#endif
+TEST_P(MessageLoopTypedTest, MAYBE_RecursivePosts) {
const int kNumTimes = 1 << 17;
MessageLoop loop(GetMessageLoopType());
loop.task_runner()->PostTask(FROM_HERE,
@@ -1749,6 +1759,33 @@ TEST_P(MessageLoopTypedTest, NestableTasksAllowedManually) {
run_loop.Run();
}
+#if defined(OS_MACOSX)
+// This metric is a bit broken on Mac OS because CFRunLoop doesn't
+// deterministically invoke MessageLoop::DoIdleWork(). This being a temporary
+// diagnosis metric, we let this fly and simply not test it on Mac.
+#define MAYBE_MetricsOnlyFromUILoops DISABLED_MetricsOnlyFromUILoops
+#else
+#define MAYBE_MetricsOnlyFromUILoops MetricsOnlyFromUILoops
+#endif
+
+TEST_P(MessageLoopTypedTest, MAYBE_MetricsOnlyFromUILoops) {
+ MessageLoop loop(GetMessageLoopType());
+
+ const bool histograms_expected = GetMessageLoopType() == MessageLoop::TYPE_UI;
+
+ HistogramTester histogram_tester;
+
+ // Loop that goes idle with one pending task.
+ RunLoop run_loop;
+ loop.task_runner()->PostDelayedTask(FROM_HERE, run_loop.QuitClosure(),
+ TimeDelta::FromMilliseconds(1));
+ run_loop.Run();
+
+ histogram_tester.ExpectTotalCount(
+ "MessageLoop.DelayedTaskQueueForUI.PendingTasksCountOnIdle",
+ histograms_expected ? 1 : 0);
+}
+
INSTANTIATE_TEST_CASE_P(
,
MessageLoopTypedTest,
@@ -2205,4 +2242,49 @@ INSTANTIATE_TEST_CASE_P(
TaskSchedulerAvailability::WITH_TASK_SCHEDULER),
MessageLoopTest::ParamInfoToString);
+namespace {
+
+class PostTaskOnDestroy {
+ public:
+ PostTaskOnDestroy(int times) : times_remaining_(times) {}
+ ~PostTaskOnDestroy() { PostTaskWithPostingDestructor(times_remaining_); }
+
+ // Post a task that will repost itself on destruction |times| times.
+ static void PostTaskWithPostingDestructor(int times) {
+ if (times > 0) {
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, BindOnce([](std::unique_ptr<PostTaskOnDestroy>) {},
+ std::make_unique<PostTaskOnDestroy>(times - 1)));
+ }
+ }
+
+ private:
+ const int times_remaining_;
+
+ DISALLOW_COPY_AND_ASSIGN(PostTaskOnDestroy);
+};
+
+} // namespace
+
+// Test that MessageLoop destruction handles a task's destructor posting another
+// task by:
+// 1) Not getting stuck clearing its task queue.
+// 2) DCHECKing when clearing pending tasks many times still doesn't yield an
+// empty queue.
+TEST(MessageLoopDestructionTest, ExpectDeathWithStubbornPostTaskOnDestroy) {
+ std::unique_ptr<MessageLoop> loop = std::make_unique<MessageLoop>();
+
+ EXPECT_DCHECK_DEATH({
+ PostTaskOnDestroy::PostTaskWithPostingDestructor(1000);
+ loop.reset();
+ });
+}
+
+TEST(MessageLoopDestructionTest, DestroysFineWithReasonablePostTaskOnDestroy) {
+ std::unique_ptr<MessageLoop> loop = std::make_unique<MessageLoop>();
+
+ PostTaskOnDestroy::PostTaskWithPostingDestructor(10);
+ loop.reset();
+}
+
} // namespace base
diff --git a/chromium/base/message_loop/message_pump_android.cc b/chromium/base/message_loop/message_pump_android.cc
index 8c5bb5727ab..3fd5567e6b2 100644
--- a/chromium/base/message_loop/message_pump_android.cc
+++ b/chromium/base/message_loop/message_pump_android.cc
@@ -4,162 +4,310 @@
#include "base/message_loop/message_pump_android.h"
+#include <android/looper.h>
+#include <errno.h>
+#include <fcntl.h>
#include <jni.h>
+#include <sys/eventfd.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <utility>
#include "base/android/jni_android.h"
#include "base/android/scoped_java_ref.h"
+#include "base/callback_helpers.h"
#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/run_loop.h"
-#include "jni/SystemMessageHandler_jni.h"
+
+// Android stripped sys/timerfd.h out of their platform headers, so we have to
+// use syscall to make use of timerfd. Once the min API level is 20, we can
+// directly use timerfd.h.
+#ifndef __NR_timerfd_create
+#error "Unable to find syscall for __NR_timerfd_create"
+#endif
+
+#ifndef TFD_TIMER_ABSTIME
+#define TFD_TIMER_ABSTIME (1 << 0)
+#endif
using base::android::JavaParamRef;
using base::android::ScopedJavaLocalRef;
namespace base {
-MessagePumpForUI::MessagePumpForUI() = default;
-MessagePumpForUI::~MessagePumpForUI() = default;
+namespace {
-// This is called by the java SystemMessageHandler whenever the message queue
-// detects an idle state (as in, control returns to the looper and there are no
-// tasks available to be run immediately).
-// See the comments in DoRunLoopOnce for how this differs from the
-// implementation on other platforms.
-void MessagePumpForUI::DoIdleWork(JNIEnv* env,
- const JavaParamRef<jobject>& obj) {
- delegate_->DoIdleWork();
+// See sys/timerfd.h
+int timerfd_create(int clockid, int flags) {
+ return syscall(__NR_timerfd_create, clockid, flags);
}
-void MessagePumpForUI::DoRunLoopOnce(JNIEnv* env,
- const JavaParamRef<jobject>& obj,
- jboolean delayed) {
- if (delayed)
- delayed_scheduled_time_ = base::TimeTicks();
+// See sys/timerfd.h
+int timerfd_settime(int ufc,
+ int flags,
+ const struct itimerspec* utmr,
+ struct itimerspec* otmr) {
+ return syscall(__NR_timerfd_settime, ufc, flags, utmr, otmr);
+}
- // If the pump has been aborted, tasks may continue to be queued up, but
- // shouldn't run.
- if (ShouldAbort())
- return;
+int NonDelayedLooperCallback(int fd, int events, void* data) {
+ if (events & ALOOPER_EVENT_HANGUP)
+ return 0;
+
+ DCHECK(events & ALOOPER_EVENT_INPUT);
+ MessagePumpForUI* pump = reinterpret_cast<MessagePumpForUI*>(data);
+ pump->OnNonDelayedLooperCallback();
+ return 1; // continue listening for events
+}
+
+int DelayedLooperCallback(int fd, int events, void* data) {
+ if (events & ALOOPER_EVENT_HANGUP)
+ return 0;
+
+ DCHECK(events & ALOOPER_EVENT_INPUT);
+ MessagePumpForUI* pump = reinterpret_cast<MessagePumpForUI*>(data);
+ pump->OnDelayedLooperCallback();
+ return 1; // continue listening for events
+}
+
+} // namespace
+
+MessagePumpForUI::MessagePumpForUI() {
+ // The Android native ALooper uses epoll to poll our file descriptors and wake
+ // us up. We use a simple level-triggered eventfd to signal that non-delayed
+ // work is available, and a timerfd to signal when delayed work is ready to
+ // be run.
+ non_delayed_fd_ = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
+ CHECK_NE(non_delayed_fd_, -1);
+ DCHECK_EQ(TimeTicks::GetClock(), TimeTicks::Clock::LINUX_CLOCK_MONOTONIC);
+
+ // We can't create the timerfd with TFD_NONBLOCK | TFD_CLOEXEC as we can't
+ // include timerfd.h. See comments above on __NR_timerfd_create. It looks like
+ // they're just aliases to O_NONBLOCK and O_CLOEXEC anyways, so this should be
+ // fine.
+ delayed_fd_ = timerfd_create(CLOCK_MONOTONIC, O_NONBLOCK | O_CLOEXEC);
+ CHECK_NE(delayed_fd_, -1);
+
+ looper_ = ALooper_prepare(0);
+ DCHECK(looper_);
+ // Add a reference to the looper so it isn't deleted on us.
+ ALooper_acquire(looper_);
+ ALooper_addFd(looper_, non_delayed_fd_, 0, ALOOPER_EVENT_INPUT,
+ &NonDelayedLooperCallback, reinterpret_cast<void*>(this));
+ ALooper_addFd(looper_, delayed_fd_, 0, ALOOPER_EVENT_INPUT,
+ &DelayedLooperCallback, reinterpret_cast<void*>(this));
+}
+
+MessagePumpForUI::~MessagePumpForUI() {
+ DCHECK_EQ(ALooper_forThread(), looper_);
+ ALooper_removeFd(looper_, non_delayed_fd_);
+ ALooper_removeFd(looper_, delayed_fd_);
+ ALooper_release(looper_);
+ looper_ = nullptr;
- // This is based on MessagePumpForUI::DoRunLoop() from desktop.
- // Note however that our system queue is handled in the java side.
- // In desktop we inspect and process a single system message and then
- // we call DoWork() / DoDelayedWork(). This is then wrapped in a for loop and
- // repeated until no work is left to do, at which point DoIdleWork is called.
- // On Android, the java message queue may contain messages for other handlers
- // that will be processed before calling here again.
- // This means that unlike Desktop, we can't wrap a for loop around this
- // function and keep processing tasks until we have no work left to do - we
- // have to return control back to the Android Looper after each message. This
- // also means we have to perform idle detection differently, which is why we
- // add an IdleHandler to the message queue in SystemMessageHandler.java, which
- // calls DoIdleWork whenever control returns back to the looper and there are
- // no tasks queued up to run immediately.
- delegate_->DoWork();
- if (ShouldAbort()) {
- // There is a pending JNI exception, return to Java so that the exception is
- // thrown correctly.
+ close(non_delayed_fd_);
+ close(delayed_fd_);
+}
+
+void MessagePumpForUI::OnDelayedLooperCallback() {
+ if (ShouldQuit())
return;
- }
+
+ // Clear the fd.
+ uint64_t value;
+ int ret = read(delayed_fd_, &value, sizeof(value));
+ DCHECK_GE(ret, 0);
+ delayed_scheduled_time_ = base::TimeTicks();
base::TimeTicks next_delayed_work_time;
delegate_->DoDelayedWork(&next_delayed_work_time);
- if (ShouldAbort()) {
- // There is a pending JNI exception, return to Java so that the exception is
- // thrown correctly
+ if (!next_delayed_work_time.is_null()) {
+ ScheduleDelayedWork(next_delayed_work_time);
+ }
+ if (ShouldQuit())
return;
+ // We may be idle now, so pump the loop to find out.
+ ScheduleWork();
+}
+
+void MessagePumpForUI::OnNonDelayedLooperCallback() {
+ base::TimeTicks next_delayed_work_time;
+ bool did_any_work = false;
+
+ // Runs all native tasks scheduled to run, scheduling delayed work if
+ // necessary.
+ while (true) {
+ bool did_work_this_loop = false;
+ if (ShouldQuit())
+ return;
+ did_work_this_loop = delegate_->DoWork();
+ if (ShouldQuit())
+ return;
+
+ did_work_this_loop |= delegate_->DoDelayedWork(&next_delayed_work_time);
+
+ did_any_work |= did_work_this_loop;
+
+ // If we didn't do any work, we're out of native tasks to run, and we should
+ // return control to the looper to run Java tasks.
+ if (!did_work_this_loop)
+ break;
}
+ // If we did any work, return control to the looper to run java tasks before
+ // we call DoIdleWork(). We haven't cleared the fd yet, so we'll get woken up
+ // again soon to check for idle-ness.
+ if (did_any_work)
+ return;
+ if (ShouldQuit())
+ return;
- if (!next_delayed_work_time.is_null())
- ScheduleDelayedWork(next_delayed_work_time);
+ // Read the file descriptor, resetting its contents to 0 and reading back the
+ // stored value.
+ // See http://man7.org/linux/man-pages/man2/eventfd.2.html
+ uint64_t value = 0;
+ int ret = read(non_delayed_fd_, &value, sizeof(value));
+ DCHECK_GE(ret, 0);
+
+ // If we read a value > 1, it means we lost the race to clear the fd before a
+ // new task was posted. This is okay, we can just re-schedule work.
+ if (value > 1) {
+ ScheduleWork();
+ } else {
+ // At this point, the java looper might not be idle - it's impossible to
+ // know pre-Android-M, so we may end up doing Idle work while java tasks are
+ // still queued up. Note that this won't cause us to fail to run java tasks
+ // using QuitWhenIdle, as the JavaHandlerThread will finish running all
+ // currently scheduled tasks before it quits. Also note that we can't just
+ // add an idle callback to the java looper, as that will fire even if native
+ // tasks are still queued up.
+ DoIdleWork();
+ if (!next_delayed_work_time.is_null()) {
+ ScheduleDelayedWork(next_delayed_work_time);
+ }
+ }
+}
+
+void MessagePumpForUI::DoIdleWork() {
+ if (delegate_->DoIdleWork()) {
+ // If DoIdleWork() resulted in any work, we're not idle yet. We need to pump
+ // the loop here because we may in fact be idle after doing idle work
+ // without any new tasks being queued.
+ ScheduleWork();
+ }
}
void MessagePumpForUI::Run(Delegate* delegate) {
- NOTREACHED() << "UnitTests should rely on MessagePumpForUIStub in"
- " test_stub_android.h";
+ DCHECK(IsTestImplementation());
+ // This function is only called in tests. We manually pump the native looper
+ // which won't run any java tasks.
+ quit_ = false;
+
+ SetDelegate(delegate);
+
+ // Pump the loop once in case we're starting off idle as ALooper_pollOnce will
+ // never return in that case.
+ ScheduleWork();
+ while (true) {
+ // Waits for either the delayed, or non-delayed fds to be signalled, calling
+ // either OnDelayedLooperCallback, or OnNonDelayedLooperCallback,
+ // respectively. This uses Android's Looper implementation, which is based
+ // off of epoll.
+ ALooper_pollOnce(-1, nullptr, nullptr, nullptr);
+ if (quit_)
+ break;
+ }
}
-void MessagePumpForUI::Start(Delegate* delegate) {
+void MessagePumpForUI::Attach(Delegate* delegate) {
DCHECK(!quit_);
- delegate_ = delegate;
+
+ // Since the Looper is controlled by the UI thread or JavaHandlerThread, we
+ // can't use Run() like we do on other platforms or we would prevent Java
+ // tasks from running. Instead we create and initialize a run loop here, then
+ // return control back to the Looper.
+
+ SetDelegate(delegate);
run_loop_ = std::make_unique<RunLoop>();
// Since the RunLoop was just created above, BeforeRun should be guaranteed to
// return true (it only returns false if the RunLoop has been Quit already).
if (!run_loop_->BeforeRun())
NOTREACHED();
-
- DCHECK(system_message_handler_obj_.is_null());
-
- JNIEnv* env = base::android::AttachCurrentThread();
- DCHECK(env);
- system_message_handler_obj_.Reset(
- Java_SystemMessageHandler_create(env, reinterpret_cast<jlong>(this)));
}
void MessagePumpForUI::Quit() {
- quit_ = true;
+ if (quit_)
+ return;
- if (!system_message_handler_obj_.is_null()) {
- JNIEnv* env = base::android::AttachCurrentThread();
- DCHECK(env);
+ quit_ = true;
- Java_SystemMessageHandler_shutdown(env, system_message_handler_obj_);
- system_message_handler_obj_.Reset();
- }
+ int64_t value;
+ // Clear any pending timer.
+ read(delayed_fd_, &value, sizeof(value));
+ // Clear the eventfd.
+ read(non_delayed_fd_, &value, sizeof(value));
if (run_loop_) {
run_loop_->AfterRun();
run_loop_ = nullptr;
}
+ if (on_quit_callback_) {
+ std::move(on_quit_callback_).Run();
+ }
}
void MessagePumpForUI::ScheduleWork() {
- if (quit_)
+ if (ShouldQuit())
return;
- DCHECK(!system_message_handler_obj_.is_null());
-
- JNIEnv* env = base::android::AttachCurrentThread();
- DCHECK(env);
- Java_SystemMessageHandler_scheduleWork(env, system_message_handler_obj_);
+ // Write (add) 1 to the eventfd. This tells the Looper to wake up and call our
+ // callback, allowing us to run tasks. This also allows us to detect, when we
+ // clear the fd, whether additional work was scheduled after we finished
+ // performing work, but before we cleared the fd, as we'll read back >=2
+ // instead of 1 in that case.
+ // See the eventfd man pages
+ // (http://man7.org/linux/man-pages/man2/eventfd.2.html) for details on how
+ // the read and write APIs for this file descriptor work, specifically without
+ // EFD_SEMAPHORE.
+ uint64_t value = 1;
+ int ret = write(non_delayed_fd_, &value, sizeof(value));
+ DCHECK_GE(ret, 0);
}
void MessagePumpForUI::ScheduleDelayedWork(const TimeTicks& delayed_work_time) {
- if (quit_)
+ if (ShouldQuit())
return;
- // In the java side, |SystemMessageHandler| keeps a single "delayed" message.
- // It's an expensive operation to |removeMessage| there, so this is optimized
- // to avoid those calls.
- //
- // At this stage, |delayed_work_time| can be:
- // 1) The same as previously scheduled: nothing to be done, move along. This
- // is the typical case, since this method is called for every single message.
- //
- // 2) Not previously scheduled: just post a new message in java.
- //
- // 3) Shorter than previously scheduled: far less common. In this case,
- // |removeMessage| and post a new one.
- //
- // 4) Longer than previously scheduled (or null): nothing to be done, move
- // along.
+
if (!delayed_scheduled_time_.is_null() &&
delayed_work_time >= delayed_scheduled_time_) {
return;
}
+
DCHECK(!delayed_work_time.is_null());
- DCHECK(!system_message_handler_obj_.is_null());
+ delayed_scheduled_time_ = delayed_work_time;
+ int64_t nanos = delayed_work_time.since_origin().InNanoseconds();
+ struct itimerspec ts;
+ ts.it_interval.tv_sec = 0; // Don't repeat.
+ ts.it_interval.tv_nsec = 0;
+ ts.it_value.tv_sec = nanos / TimeTicks::kNanosecondsPerSecond;
+ ts.it_value.tv_nsec = nanos % TimeTicks::kNanosecondsPerSecond;
- JNIEnv* env = base::android::AttachCurrentThread();
- DCHECK(env);
+ int ret = timerfd_settime(delayed_fd_, TFD_TIMER_ABSTIME, &ts, nullptr);
+ DCHECK_GE(ret, 0);
+}
- jlong millis =
- (delayed_work_time - TimeTicks::Now()).InMillisecondsRoundedUp();
- delayed_scheduled_time_ = delayed_work_time;
- // Note that we're truncating to milliseconds as required by the java side,
- // even though delayed_work_time is microseconds resolution.
- Java_SystemMessageHandler_scheduleDelayedWork(
- env, system_message_handler_obj_, millis);
+void MessagePumpForUI::QuitWhenIdle(base::OnceClosure callback) {
+ DCHECK(!on_quit_callback_);
+ DCHECK(run_loop_);
+ on_quit_callback_ = std::move(callback);
+ run_loop_->QuitWhenIdle();
+ // Pump the loop in case we're already idle.
+ ScheduleWork();
+}
+
+bool MessagePumpForUI::IsTestImplementation() const {
+ return false;
}
} // namespace base
diff --git a/chromium/base/message_loop/message_pump_android.h b/chromium/base/message_loop/message_pump_android.h
index d09fdde789f..d7e0f50fde2 100644
--- a/chromium/base/message_loop/message_pump_android.h
+++ b/chromium/base/message_loop/message_pump_android.h
@@ -10,11 +10,14 @@
#include "base/android/scoped_java_ref.h"
#include "base/base_export.h"
+#include "base/callback.h"
#include "base/compiler_specific.h"
#include "base/macros.h"
#include "base/message_loop/message_pump.h"
#include "base/time/time.h"
+struct ALooper;
+
namespace base {
class RunLoop;
@@ -26,33 +29,71 @@ class BASE_EXPORT MessagePumpForUI : public MessagePump {
MessagePumpForUI();
~MessagePumpForUI() override;
- void DoIdleWork(JNIEnv* env, const base::android::JavaParamRef<jobject>& obj);
- void DoRunLoopOnce(JNIEnv* env,
- const base::android::JavaParamRef<jobject>& obj,
- jboolean delayed);
-
void Run(Delegate* delegate) override;
void Quit() override;
void ScheduleWork() override;
void ScheduleDelayedWork(const TimeTicks& delayed_work_time) override;
- virtual void Start(Delegate* delegate);
+ // Attaches |delegate| to this native MessagePump. |delegate| will from then
+ // on be invoked by the native loop to process application tasks.
+ virtual void Attach(Delegate* delegate);
// We call Abort when there is a pending JNI exception, meaning that the
// current thread will crash when we return to Java.
// We can't call any JNI-methods before returning to Java as we would then
// cause a native crash (instead of the original Java crash).
void Abort() { should_abort_ = true; }
- bool ShouldAbort() const { return should_abort_; }
+ bool IsAborted() { return should_abort_; }
+ bool ShouldQuit() const { return should_abort_ || quit_; }
+
+ // Tells the RunLoop to quit when idle, calling the callback when it's safe
+ // for the Thread to stop.
+ void QuitWhenIdle(base::OnceClosure callback);
+
+ // These functions are only public so that the looper callbacks can call them,
+ // and should not be called from outside this class.
+ void OnDelayedLooperCallback();
+ void OnNonDelayedLooperCallback();
+
+ protected:
+ void SetDelegate(Delegate* delegate) { delegate_ = delegate; }
+ virtual bool IsTestImplementation() const;
private:
+ void DoIdleWork();
+
+ // Unlike other platforms, we don't control the message loop as it's
+ // controlled by the Android Looper, so we can't run a RunLoop to keep the
+ // Thread this pump belongs to alive. However, threads are expected to have an
+ // active run loop, so we manage a RunLoop internally here, starting/stopping
+ // it as necessary.
std::unique_ptr<RunLoop> run_loop_;
- base::android::ScopedJavaGlobalRef<jobject> system_message_handler_obj_;
+
+ // See Abort().
bool should_abort_ = false;
+
+ // Whether this message pump is quitting, or has quit.
bool quit_ = false;
+
+ // The MessageLoop::Delegate for this pump.
Delegate* delegate_ = nullptr;
+
+ // The time at which we are currently scheduled to wake up and perform a
+ // delayed task.
base::TimeTicks delayed_scheduled_time_;
+ // If set, a callback to fire when the message pump is quit.
+ base::OnceClosure on_quit_callback_;
+
+ // The file descriptor used to signal that non-delayed work is available.
+ int non_delayed_fd_;
+
+ // The file descriptor used to signal that delayed work is available.
+ int delayed_fd_;
+
+ // The Android Looper for this thread.
+ ALooper* looper_ = nullptr;
+
DISALLOW_COPY_AND_ASSIGN(MessagePumpForUI);
};
diff --git a/chromium/base/message_loop/message_pump_fuchsia.cc b/chromium/base/message_loop/message_pump_fuchsia.cc
index b9af6433dd7..91585fcd539 100644
--- a/chromium/base/message_loop/message_pump_fuchsia.cc
+++ b/chromium/base/message_loop/message_pump_fuchsia.cc
@@ -4,8 +4,8 @@
#include "base/message_loop/message_pump_fuchsia.h"
-#include <fdio/io.h>
-#include <fdio/private.h>
+#include <lib/fdio/io.h>
+#include <lib/fdio/private.h>
#include <zircon/status.h>
#include <zircon/syscalls.h>
diff --git a/chromium/base/message_loop/message_pump_glib_unittest.cc b/chromium/base/message_loop/message_pump_glib_unittest.cc
index 70be2a4f74c..512cea63125 100644
--- a/chromium/base/message_loop/message_pump_glib_unittest.cc
+++ b/chromium/base/message_loop/message_pump_glib_unittest.cc
@@ -191,8 +191,9 @@ TEST_F(MessagePumpGLibTest, TestQuit) {
injector()->Reset();
// Quit from an event
- injector()->AddEvent(0, RunLoop::QuitCurrentWhenIdleClosureDeprecated());
- RunLoop().Run();
+ RunLoop run_loop;
+ injector()->AddEvent(0, run_loop.QuitClosure());
+ run_loop.Run();
EXPECT_EQ(1, injector()->processed_events());
}
@@ -211,8 +212,11 @@ TEST_F(MessagePumpGLibTest, TestEventTaskInterleave) {
BindOnce(&PostMessageLoopTask, FROM_HERE, std::move(check_task));
injector()->AddEventAsTask(0, std::move(posted_task));
injector()->AddEventAsTask(0, DoNothing());
- injector()->AddEvent(0, RunLoop::QuitCurrentWhenIdleClosureDeprecated());
- RunLoop().Run();
+ {
+ RunLoop run_loop;
+ injector()->AddEvent(0, run_loop.QuitClosure());
+ run_loop.Run();
+ }
EXPECT_EQ(4, injector()->processed_events());
injector()->Reset();
@@ -222,8 +226,11 @@ TEST_F(MessagePumpGLibTest, TestEventTaskInterleave) {
BindOnce(&PostMessageLoopTask, FROM_HERE, std::move(check_task));
injector()->AddEventAsTask(0, std::move(posted_task));
injector()->AddEventAsTask(10, DoNothing());
- injector()->AddEvent(0, RunLoop::QuitCurrentWhenIdleClosureDeprecated());
- RunLoop().Run();
+ {
+ RunLoop run_loop;
+ injector()->AddEvent(0, run_loop.QuitClosure());
+ run_loop.Run();
+ }
EXPECT_EQ(4, injector()->processed_events());
}
@@ -237,10 +244,13 @@ TEST_F(MessagePumpGLibTest, TestWorkWhileWaitingForEvents) {
}
// After all the previous tasks have executed, enqueue an event that will
// quit.
- loop()->task_runner()->PostTask(
- FROM_HERE, BindOnce(&EventInjector::AddEvent, Unretained(injector()), 0,
- RunLoop::QuitCurrentWhenIdleClosureDeprecated()));
- RunLoop().Run();
+ {
+ RunLoop run_loop;
+ loop()->task_runner()->PostTask(
+ FROM_HERE, BindOnce(&EventInjector::AddEvent, Unretained(injector()), 0,
+ run_loop.QuitClosure()));
+ run_loop.Run();
+ }
ASSERT_EQ(10, task_count);
EXPECT_EQ(1, injector()->processed_events());
@@ -256,12 +266,15 @@ TEST_F(MessagePumpGLibTest, TestWorkWhileWaitingForEvents) {
// quit.
// This relies on the fact that delayed tasks are executed in delay order.
// That is verified in message_loop_unittest.cc.
- loop()->task_runner()->PostDelayedTask(
- FROM_HERE,
- BindOnce(&EventInjector::AddEvent, Unretained(injector()), 10,
- RunLoop::QuitCurrentWhenIdleClosureDeprecated()),
- TimeDelta::FromMilliseconds(150));
- RunLoop().Run();
+ {
+ RunLoop run_loop;
+ loop()->task_runner()->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&EventInjector::AddEvent, Unretained(injector()), 0,
+ run_loop.QuitClosure()),
+ TimeDelta::FromMilliseconds(150));
+ run_loop.Run();
+ }
ASSERT_EQ(10, task_count);
EXPECT_EQ(1, injector()->processed_events());
}
@@ -282,8 +295,9 @@ TEST_F(MessagePumpGLibTest, TestEventsWhileWaitingForWork) {
injector()->AddEventAsTask(10, std::move(posted_task));
// And then quit (relies on the condition tested by TestEventTaskInterleave).
- injector()->AddEvent(10, RunLoop::QuitCurrentWhenIdleClosureDeprecated());
- RunLoop().Run();
+ RunLoop run_loop;
+ injector()->AddEvent(10, run_loop.QuitClosure());
+ run_loop.Run();
EXPECT_EQ(12, injector()->processed_events());
}
@@ -295,18 +309,18 @@ namespace {
// while making sure there is always work to do and events in the queue.
class ConcurrentHelper : public RefCounted<ConcurrentHelper> {
public:
- explicit ConcurrentHelper(EventInjector* injector)
+ ConcurrentHelper(EventInjector* injector, OnceClosure done_closure)
: injector_(injector),
+ done_closure_(std::move(done_closure)),
event_count_(kStartingEventCount),
- task_count_(kStartingTaskCount) {
- }
+ task_count_(kStartingTaskCount) {}
void FromTask() {
if (task_count_ > 0) {
--task_count_;
}
if (task_count_ == 0 && event_count_ == 0) {
- RunLoop::QuitCurrentWhenIdleDeprecated();
+ std::move(done_closure_).Run();
} else {
ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, BindOnce(&ConcurrentHelper::FromTask, this));
@@ -318,7 +332,7 @@ class ConcurrentHelper : public RefCounted<ConcurrentHelper> {
--event_count_;
}
if (task_count_ == 0 && event_count_ == 0) {
- RunLoop::QuitCurrentWhenIdleDeprecated();
+ std::move(done_closure_).Run();
} else {
injector_->AddEventAsTask(0,
BindOnce(&ConcurrentHelper::FromEvent, this));
@@ -337,6 +351,7 @@ class ConcurrentHelper : public RefCounted<ConcurrentHelper> {
static const int kStartingTaskCount = 20;
EventInjector* injector_;
+ OnceClosure done_closure_;
int event_count_;
int task_count_;
};
@@ -349,7 +364,9 @@ TEST_F(MessagePumpGLibTest, TestConcurrentEventPostedTask) {
// full, the helper verifies that both tasks and events get processed.
// If that is not the case, either event_count_ or task_count_ will not get
// to 0, and MessageLoop::QuitWhenIdle() will never be called.
- scoped_refptr<ConcurrentHelper> helper = new ConcurrentHelper(injector());
+ RunLoop run_loop;
+ scoped_refptr<ConcurrentHelper> helper =
+ new ConcurrentHelper(injector(), run_loop.QuitClosure());
// Add 2 events to the queue to make sure it is always full (when we remove
// the event before processing it).
@@ -362,19 +379,19 @@ TEST_F(MessagePumpGLibTest, TestConcurrentEventPostedTask) {
loop()->task_runner()->PostTask(
FROM_HERE, BindOnce(&ConcurrentHelper::FromTask, helper));
- RunLoop().Run();
+ run_loop.Run();
EXPECT_EQ(0, helper->event_count());
EXPECT_EQ(0, helper->task_count());
}
namespace {
-void AddEventsAndDrainGLib(EventInjector* injector) {
+void AddEventsAndDrainGLib(EventInjector* injector, OnceClosure on_drained) {
// Add a couple of dummy events
injector->AddDummyEvent(0);
injector->AddDummyEvent(0);
// Then add an event that will quit the main loop.
- injector->AddEvent(0, RunLoop::QuitCurrentWhenIdleClosureDeprecated());
+ injector->AddEvent(0, std::move(on_drained));
// Post a couple of dummy tasks
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, DoNothing());
@@ -390,9 +407,11 @@ void AddEventsAndDrainGLib(EventInjector* injector) {
TEST_F(MessagePumpGLibTest, TestDrainingGLib) {
// Tests that draining events using GLib works.
+ RunLoop run_loop;
loop()->task_runner()->PostTask(
- FROM_HERE, BindOnce(&AddEventsAndDrainGLib, Unretained(injector())));
- RunLoop().Run();
+ FROM_HERE, BindOnce(&AddEventsAndDrainGLib, Unretained(injector()),
+ run_loop.QuitClosure()));
+ run_loop.Run();
EXPECT_EQ(3, injector()->processed_events());
}
@@ -432,7 +451,7 @@ class GLibLoopRunner : public RefCounted<GLibLoopRunner> {
bool quit_;
};
-void TestGLibLoopInternal(EventInjector* injector) {
+void TestGLibLoopInternal(EventInjector* injector, OnceClosure done) {
// Allow tasks to be processed from 'native' event loops.
MessageLoopCurrent::Get()->SetNestableTasksAllowed(true);
scoped_refptr<GLibLoopRunner> runner = new GLibLoopRunner();
@@ -462,10 +481,10 @@ void TestGLibLoopInternal(EventInjector* injector) {
ASSERT_EQ(3, task_count);
EXPECT_EQ(4, injector->processed_events());
- RunLoop::QuitCurrentWhenIdleDeprecated();
+ std::move(done).Run();
}
-void TestGtkLoopInternal(EventInjector* injector) {
+void TestGtkLoopInternal(EventInjector* injector, OnceClosure done) {
// Allow tasks to be processed from 'native' event loops.
MessageLoopCurrent::Get()->SetNestableTasksAllowed(true);
scoped_refptr<GLibLoopRunner> runner = new GLibLoopRunner();
@@ -495,7 +514,7 @@ void TestGtkLoopInternal(EventInjector* injector) {
ASSERT_EQ(3, task_count);
EXPECT_EQ(4, injector->processed_events());
- RunLoop::QuitCurrentWhenIdleDeprecated();
+ std::move(done).Run();
}
} // namespace
@@ -505,9 +524,11 @@ TEST_F(MessagePumpGLibTest, TestGLibLoop) {
// loop is not run by MessageLoop::Run() but by a straight GLib loop.
// Note that in this case we don't make strong guarantees about niceness
// between events and posted tasks.
+ RunLoop run_loop;
loop()->task_runner()->PostTask(
- FROM_HERE, BindOnce(&TestGLibLoopInternal, Unretained(injector())));
- RunLoop().Run();
+ FROM_HERE, BindOnce(&TestGLibLoopInternal, Unretained(injector()),
+ run_loop.QuitClosure()));
+ run_loop.Run();
}
TEST_F(MessagePumpGLibTest, TestGtkLoop) {
@@ -515,9 +536,11 @@ TEST_F(MessagePumpGLibTest, TestGtkLoop) {
// loop is not run by MessageLoop::Run() but by a straight Gtk loop.
// Note that in this case we don't make strong guarantees about niceness
// between events and posted tasks.
+ RunLoop run_loop;
loop()->task_runner()->PostTask(
- FROM_HERE, BindOnce(&TestGtkLoopInternal, Unretained(injector())));
- RunLoop().Run();
+ FROM_HERE, BindOnce(&TestGtkLoopInternal, Unretained(injector()),
+ run_loop.QuitClosure()));
+ run_loop.Run();
}
} // namespace base
diff --git a/chromium/base/message_loop/message_pump_perftest.cc b/chromium/base/message_loop/message_pump_perftest.cc
index 76f18cb467e..71ed4912d84 100644
--- a/chromium/base/message_loop/message_pump_perftest.cc
+++ b/chromium/base/message_loop/message_pump_perftest.cc
@@ -240,67 +240,4 @@ TEST_F(ScheduleWorkTest, ThreadTimeToJavaFromFourThreads) {
}
#endif
-class FakeMessagePump : public MessagePump {
- public:
- FakeMessagePump() = default;
- ~FakeMessagePump() override = default;
-
- void Run(Delegate* delegate) override {}
-
- void Quit() override {}
- void ScheduleWork() override {}
- void ScheduleDelayedWork(const TimeTicks& delayed_work_time) override {}
-};
-
-class PostTaskTest : public testing::Test {
- public:
- void Run(int batch_size, int tasks_per_reload) {
- base::TimeTicks start = base::TimeTicks::Now();
- base::TimeTicks now;
- MessageLoop loop(std::unique_ptr<MessagePump>(new FakeMessagePump));
- scoped_refptr<internal::IncomingTaskQueue> queue(
- new internal::IncomingTaskQueue(&loop));
- uint32_t num_posted = 0;
- do {
- for (int i = 0; i < batch_size; ++i) {
- for (int j = 0; j < tasks_per_reload; ++j) {
- queue->AddToIncomingQueue(FROM_HERE, DoNothing(), base::TimeDelta(),
- Nestable::kNonNestable);
- num_posted++;
- }
- TaskQueue loop_local_queue;
- queue->ReloadWorkQueue(&loop_local_queue);
- while (!loop_local_queue.empty()) {
- PendingTask t = std::move(loop_local_queue.front());
- loop_local_queue.pop();
- loop.RunTask(&t);
- }
- }
-
- now = base::TimeTicks::Now();
- } while (now - start < base::TimeDelta::FromSeconds(5));
- std::string trace = StringPrintf("%d_tasks_per_reload", tasks_per_reload);
- perf_test::PrintResult(
- "task",
- "",
- trace,
- (now - start).InMicroseconds() / static_cast<double>(num_posted),
- "us/task",
- true);
- queue->WillDestroyCurrentMessageLoop();
- }
-};
-
-TEST_F(PostTaskTest, OneTaskPerReload) {
- Run(10000, 1);
-}
-
-TEST_F(PostTaskTest, TenTasksPerReload) {
- Run(10000, 10);
-}
-
-TEST_F(PostTaskTest, OneHundredTasksPerReload) {
- Run(1000, 100);
-}
-
} // namespace base
diff --git a/chromium/base/message_loop/message_pump_win.cc b/chromium/base/message_loop/message_pump_win.cc
index 68bb4c08465..c95f85abfee 100644
--- a/chromium/base/message_loop/message_pump_win.cc
+++ b/chromium/base/message_loop/message_pump_win.cc
@@ -46,10 +46,6 @@ void MessagePumpWin::Run(Delegate* delegate) {
s.should_quit = false;
s.run_depth = state_ ? state_->run_depth + 1 : 1;
- // TODO(stanisc): crbug.com/596190: Remove this code once the bug is fixed.
- s.schedule_work_error_count = 0;
- s.last_schedule_work_error_time = Time();
-
RunState* previous_state = state_;
state_ = &s;
@@ -118,8 +114,6 @@ void MessagePumpForUI::ScheduleWork() {
InterlockedExchange(&work_state_, READY);
UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem", MESSAGE_POST_ERROR,
MESSAGE_LOOP_PROBLEM_MAX);
- state_->schedule_work_error_count++;
- state_->last_schedule_work_error_time = Time::Now();
}
void MessagePumpForUI::ScheduleDelayedWork(const TimeTicks& delayed_work_time) {
@@ -351,7 +345,7 @@ bool MessagePumpForUI::ProcessNextWindowsMessage() {
}
bool MessagePumpForUI::ProcessMessageHelper(const MSG& msg) {
- TRACE_EVENT1("base", "MessagePumpForUI::ProcessMessageHelper",
+ TRACE_EVENT1("base,toplevel", "MessagePumpForUI::ProcessMessageHelper",
"message", msg.message);
if (WM_QUIT == msg.message) {
if (enable_wm_quit_) {
@@ -444,8 +438,6 @@ void MessagePumpForIO::ScheduleWork() {
InterlockedExchange(&work_state_, READY); // Clarify that we didn't succeed.
UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem", COMPLETION_POST_ERROR,
MESSAGE_LOOP_PROBLEM_MAX);
- state_->schedule_work_error_count++;
- state_->last_schedule_work_error_time = Time::Now();
}
void MessagePumpForIO::ScheduleDelayedWork(const TimeTicks& delayed_work_time) {
@@ -455,11 +447,11 @@ void MessagePumpForIO::ScheduleDelayedWork(const TimeTicks& delayed_work_time) {
delayed_work_time_ = delayed_work_time;
}
-void MessagePumpForIO::RegisterIOHandler(HANDLE file_handle,
- IOHandler* handler) {
+HRESULT MessagePumpForIO::RegisterIOHandler(HANDLE file_handle,
+ IOHandler* handler) {
HANDLE port = CreateIoCompletionPort(file_handle, port_.Get(),
reinterpret_cast<ULONG_PTR>(handler), 1);
- DPCHECK(port);
+ return (port != nullptr) ? S_OK : HRESULT_FROM_WIN32(GetLastError());
}
bool MessagePumpForIO::RegisterJobObject(HANDLE job_handle,
diff --git a/chromium/base/message_loop/message_pump_win.h b/chromium/base/message_loop/message_pump_win.h
index c1f96bd1e24..900fcc0e380 100644
--- a/chromium/base/message_loop/message_pump_win.h
+++ b/chromium/base/message_loop/message_pump_win.h
@@ -38,11 +38,6 @@ class BASE_EXPORT MessagePumpWin : public MessagePump {
// Used to count how many Run() invocations are on the stack.
int run_depth;
-
- // Used to help diagnose hangs.
- // TODO(stanisc): crbug.com/596190: Remove these once the bug is fixed.
- int schedule_work_error_count;
- Time last_schedule_work_error_time;
};
// State used with |work_state_| variable.
@@ -216,7 +211,7 @@ class BASE_EXPORT MessagePumpForIO : public MessagePumpWin {
// Register the handler to be used when asynchronous IO for the given file
// completes. The registration persists as long as |file_handle| is valid, so
// |handler| must be valid as long as there is pending IO for the given file.
- void RegisterIOHandler(HANDLE file_handle, IOHandler* handler);
+ HRESULT RegisterIOHandler(HANDLE file_handle, IOHandler* handler);
// Register the handler to be used to process job events. The registration
// persists as long as the job object is live, so |handler| must be valid
diff --git a/chromium/base/metrics/field_trial_unittest.cc b/chromium/base/metrics/field_trial_unittest.cc
index 3f7cc309ac7..7dbe737d8a8 100644
--- a/chromium/base/metrics/field_trial_unittest.cc
+++ b/chromium/base/metrics/field_trial_unittest.cc
@@ -1171,7 +1171,14 @@ TEST(FieldTrialDeathTest, OneTimeRandomizedTrialWithoutFieldTrialList) {
"");
}
-TEST(FieldTrialListTest, TestCopyFieldTrialStateToFlags) {
+#if defined(OS_FUCHSIA)
+// TODO(crbug.com/752368): This is flaky on Fuchsia.
+#define MAYBE_TestCopyFieldTrialStateToFlags \
+ DISABLED_TestCopyFieldTrialStateToFlags
+#else
+#define MAYBE_TestCopyFieldTrialStateToFlags TestCopyFieldTrialStateToFlags
+#endif
+TEST(FieldTrialListTest, MAYBE_TestCopyFieldTrialStateToFlags) {
constexpr char kFieldTrialHandleSwitch[] = "test-field-trial-handle";
constexpr char kEnableFeaturesSwitch[] = "test-enable-features";
constexpr char kDisableFeaturesSwitch[] = "test-disable-features";
@@ -1320,7 +1327,13 @@ TEST(FieldTrialListTest, AssociateFieldTrialParams) {
EXPECT_EQ(2U, new_params.size());
}
-TEST(FieldTrialListTest, ClearParamsFromSharedMemory) {
+#if defined(OS_FUCHSIA)
+// TODO(crbug.com/752368): This is flaky on Fuchsia.
+#define MAYBE_ClearParamsFromSharedMemory DISABLED_ClearParamsFromSharedMemory
+#else
+#define MAYBE_ClearParamsFromSharedMemory ClearParamsFromSharedMemory
+#endif
+TEST(FieldTrialListTest, MAYBE_ClearParamsFromSharedMemory) {
std::string trial_name("Trial1");
std::string group_name("Group1");
diff --git a/chromium/base/metrics/histogram.cc b/chromium/base/metrics/histogram.cc
index 7e93c674be3..2ea1f884c6c 100644
--- a/chromium/base/metrics/histogram.cc
+++ b/chromium/base/metrics/histogram.cc
@@ -32,7 +32,6 @@
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "base/synchronization/lock.h"
-#include "base/sys_info.h"
#include "base/values.h"
#include "build/build_config.h"
@@ -568,36 +567,6 @@ void Histogram::ValidateHistogramContents() const {
CHECK(unlogged_samples_->bucket_ranges());
CHECK(logged_samples_);
CHECK(logged_samples_->bucket_ranges());
-#if !defined(OS_NACL)
- if (0U == logged_samples_->id() && (flags() & kIsPersistent)) {
- // ID should never be zero. If it is, then it's probably because the
- // entire memory page was cleared. Check that this is true.
- // TODO(bcwhite): Remove this.
- // https://bugs.chromium.org/p/chromium/issues/detail?id=836875
- size_t page_size = SysInfo::VMAllocationGranularity();
- if (page_size == 0)
- page_size = 1024;
- const int* address = reinterpret_cast<const int*>(
- reinterpret_cast<uintptr_t>(logged_samples_->meta()) &
- ~(page_size - 1));
- // Check a couple places so there is evidence in a crash report as to
- // where it was non-zero.
- CHECK_EQ(0, address[0]);
- CHECK_EQ(0, address[1]);
- CHECK_EQ(0, address[2]);
- CHECK_EQ(0, address[4]);
- CHECK_EQ(0, address[8]);
- CHECK_EQ(0, address[16]);
- CHECK_EQ(0, address[32]);
- CHECK_EQ(0, address[64]);
- CHECK_EQ(0, address[128]);
- CHECK_EQ(0, address[256]);
- CHECK_EQ(0, address[512]);
- // Now check every address.
- for (size_t i = 0; i < page_size / sizeof(int); ++i)
- CHECK_EQ(0, address[i]);
- }
-#endif
CHECK_NE(0U, logged_samples_->id());
}
@@ -1078,16 +1047,18 @@ ScaledLinearHistogram::ScaledLinearHistogram(const char* name,
ScaledLinearHistogram::~ScaledLinearHistogram() = default;
void ScaledLinearHistogram::AddScaledCount(Sample value, int count) {
+ if (count == 0)
+ return;
+ if (count < 0) {
+ NOTREACHED();
+ return;
+ }
const int32_t max_value =
static_cast<int32_t>(histogram_->bucket_count() - 1);
if (value > max_value)
value = max_value;
if (value < 0)
value = 0;
- if (count <= 0) {
- NOTREACHED();
- return;
- }
int scaled_count = count / scale_;
subtle::Atomic32 remainder = count - scaled_count * scale_;
diff --git a/chromium/base/metrics/histogram_base.cc b/chromium/base/metrics/histogram_base.cc
index da3ae931d1c..990d9f5e043 100644
--- a/chromium/base/metrics/histogram_base.cc
+++ b/chromium/base/metrics/histogram_base.cc
@@ -18,6 +18,7 @@
#include "base/metrics/histogram_samples.h"
#include "base/metrics/sparse_histogram.h"
#include "base/metrics/statistics_recorder.h"
+#include "base/numerics/safe_conversions.h"
#include "base/pickle.h"
#include "base/process/process_handle.h"
#include "base/rand_util.h"
@@ -113,7 +114,7 @@ void HistogramBase::AddKiB(Sample value, int count) {
}
void HistogramBase::AddTimeMillisecondsGranularity(const TimeDelta& time) {
- Add(static_cast<Sample>(time.InMilliseconds()));
+ Add(saturated_cast<Sample>(time.InMilliseconds()));
}
void HistogramBase::AddTimeMicrosecondsGranularity(const TimeDelta& time) {
@@ -121,7 +122,7 @@ void HistogramBase::AddTimeMicrosecondsGranularity(const TimeDelta& time) {
// clocks. High-resolution metrics cannot make use of low-resolution data and
// reporting it merely adds noise to the metric. https://crbug.com/807615#c16
if (TimeTicks::IsHighResolution())
- Add(static_cast<Sample>(time.InMicroseconds()));
+ Add(saturated_cast<Sample>(time.InMicroseconds()));
}
void HistogramBase::AddBoolean(bool value) {
diff --git a/chromium/base/metrics/histogram_base_unittest.cc b/chromium/base/metrics/histogram_base_unittest.cc
index e539e5cb2e9..0314ef4d61e 100644
--- a/chromium/base/metrics/histogram_base_unittest.cc
+++ b/chromium/base/metrics/histogram_base_unittest.cc
@@ -186,4 +186,90 @@ TEST_F(HistogramBaseTest, AddKiB) {
EXPECT_GE(2, samples->GetCount(300));
}
+TEST_F(HistogramBaseTest, AddTimeMillisecondsGranularityOverflow) {
+ const HistogramBase::Sample sample_max =
+ std::numeric_limits<HistogramBase::Sample>::max() / 2;
+ HistogramBase* histogram = LinearHistogram::FactoryGet(
+ "TestAddTimeMillisecondsGranularity1", 1, sample_max, 100, 0);
+ int64_t large_positive = std::numeric_limits<int64_t>::max();
+ // |add_count| is the number of large values that have been added to the
+ // histogram. We consider a number to be 'large' if it cannot be represented
+ // in a HistogramBase::Sample.
+ int add_count = 0;
+ while (large_positive > std::numeric_limits<HistogramBase::Sample>::max()) {
+ // Add the TimeDelta corresponding to |large_positive| milliseconds to the
+ // histogram.
+ histogram->AddTimeMillisecondsGranularity(
+ TimeDelta::FromMilliseconds(large_positive));
+ ++add_count;
+ // Reduce the value of |large_positive|. The choice of 7 here is
+ // arbitrary.
+ large_positive /= 7;
+ }
+ std::unique_ptr<HistogramSamples> samples = histogram->SnapshotSamples();
+ // All of the reported values must have gone into the max overflow bucket.
+ EXPECT_EQ(add_count, samples->GetCount(sample_max));
+
+ // We now perform the analoguous operations, now with negative values with a
+ // large absolute value.
+ histogram = LinearHistogram::FactoryGet("TestAddTimeMillisecondsGranularity2",
+ 1, sample_max, 100, 0);
+ int64_t large_negative = std::numeric_limits<int64_t>::min();
+ add_count = 0;
+ while (large_negative < std::numeric_limits<HistogramBase::Sample>::min()) {
+ histogram->AddTimeMillisecondsGranularity(
+ TimeDelta::FromMilliseconds(large_negative));
+ ++add_count;
+ large_negative /= 7;
+ }
+ samples = histogram->SnapshotSamples();
+ // All of the reported values must have gone into the min overflow bucket.
+ EXPECT_EQ(add_count, samples->GetCount(0));
+}
+
+TEST_F(HistogramBaseTest, AddTimeMicrosecondsGranularityOverflow) {
+ // Nothing to test if we don't have a high resolution clock.
+ if (!TimeTicks::IsHighResolution())
+ return;
+
+ const HistogramBase::Sample sample_max =
+ std::numeric_limits<HistogramBase::Sample>::max() / 2;
+ HistogramBase* histogram = LinearHistogram::FactoryGet(
+ "TestAddTimeMicrosecondsGranularity1", 1, sample_max, 100, 0);
+ int64_t large_positive = std::numeric_limits<int64_t>::max();
+ // |add_count| is the number of large values that have been added to the
+ // histogram. We consider a number to be 'large' if it cannot be represented
+ // in a HistogramBase::Sample.
+ int add_count = 0;
+ while (large_positive > std::numeric_limits<HistogramBase::Sample>::max()) {
+ // Add the TimeDelta corresponding to |large_positive| microseconds to the
+ // histogram.
+ histogram->AddTimeMicrosecondsGranularity(
+ TimeDelta::FromMicroseconds(large_positive));
+ ++add_count;
+ // Reduce the value of |large_positive|. The choice of 7 here is
+ // arbitrary.
+ large_positive /= 7;
+ }
+ std::unique_ptr<HistogramSamples> samples = histogram->SnapshotSamples();
+ // All of the reported values must have gone into the max overflow bucket.
+ EXPECT_EQ(add_count, samples->GetCount(sample_max));
+
+ // We now perform the analoguous operations, now with negative values with a
+ // large absolute value.
+ histogram = LinearHistogram::FactoryGet("TestAddTimeMicrosecondsGranularity2",
+ 1, sample_max, 100, 0);
+ int64_t large_negative = std::numeric_limits<int64_t>::min();
+ add_count = 0;
+ while (large_negative < std::numeric_limits<HistogramBase::Sample>::min()) {
+ histogram->AddTimeMicrosecondsGranularity(
+ TimeDelta::FromMicroseconds(large_negative));
+ ++add_count;
+ large_negative /= 7;
+ }
+ samples = histogram->SnapshotSamples();
+ // All of the reported values must have gone into the min overflow bucket.
+ EXPECT_EQ(add_count, samples->GetCount(0));
+}
+
} // namespace base
diff --git a/chromium/base/metrics/histogram_functions_unittest.cc b/chromium/base/metrics/histogram_functions_unittest.cc
index 37206747572..32f439469bd 100644
--- a/chromium/base/metrics/histogram_functions_unittest.cc
+++ b/chromium/base/metrics/histogram_functions_unittest.cc
@@ -5,7 +5,7 @@
#include "base/metrics/histogram_functions.h"
#include "base/metrics/histogram_macros.h"
-#include "base/test/histogram_tester.h"
+#include "base/test/metrics/histogram_tester.h"
#include "base/time/time.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
diff --git a/chromium/base/metrics/histogram_samples.h b/chromium/base/metrics/histogram_samples.h
index 059fd3c3bac..6908873cee3 100644
--- a/chromium/base/metrics/histogram_samples.h
+++ b/chromium/base/metrics/histogram_samples.h
@@ -159,11 +159,6 @@ class BASE_EXPORT HistogramSamples {
return subtle::NoBarrier_Load(&meta_->redundant_count);
}
- // Temporarily visible for crash debugging. Should be protected.
- // TODO(bcwhite): Move this back where it belongs.
- // https://bugs.chromium.org/p/chromium/issues/detail?id=836875
- Metadata* meta() { return meta_; }
-
protected:
enum NegativeSampleReason {
SAMPLES_HAVE_LOGGED_BUT_NOT_SAMPLE,
@@ -201,6 +196,8 @@ class BASE_EXPORT HistogramSamples {
return meta_->single_sample;
}
+ Metadata* meta() { return meta_; }
+
private:
// Depending on derived class meta values can come from local stoarge or
// external storage in which case HistogramSamples class cannot take ownership
diff --git a/chromium/base/metrics/persistent_histogram_allocator.cc b/chromium/base/metrics/persistent_histogram_allocator.cc
index bfbb44b9a13..cb01c410f5d 100644
--- a/chromium/base/metrics/persistent_histogram_allocator.cc
+++ b/chromium/base/metrics/persistent_histogram_allocator.cc
@@ -301,7 +301,6 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::GetHistogram(
// but that doesn't work because the allocated block may have been
// aligned to the next boundary value.
HashMetricName(data->name) != data->samples_metadata.id) {
- NOTREACHED();
return nullptr;
}
return CreateHistogram(data);
@@ -343,7 +342,6 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram(
size_t counts_bytes = CalculateRequiredCountsBytes(bucket_count);
if (counts_bytes == 0) {
// |bucket_count| was out-of-range.
- NOTREACHED();
return nullptr;
}
@@ -374,7 +372,6 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram(
bucket_ranges->set_persistent_reference(ranges_ref);
} else {
// This should never happen but be tolerant if it does.
- NOTREACHED();
ranges_ref = PersistentMemoryAllocator::kReferenceNull;
}
}
@@ -425,9 +422,6 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram(
return histogram;
}
- if (memory_allocator_->IsCorrupt())
- NOTREACHED() << memory_allocator_->Name() << " is corrupt!";
-
return nullptr;
}
@@ -460,7 +454,6 @@ void PersistentHistogramAllocator::MergeHistogramDeltaToStatisticsRecorder(
// so a future try, if successful, will get what was missed. If it
// continues to fail, some metric data will be lost but that is better
// than crashing.
- NOTREACHED();
return;
}
@@ -476,7 +469,6 @@ void PersistentHistogramAllocator::MergeHistogramFinalDeltaToStatisticsRecorder(
if (!existing) {
// The above should never fail but if it does, no real harm is done.
// Some metric data will be lost but that is better than crashing.
- NOTREACHED();
return;
}
@@ -504,10 +496,8 @@ void PersistentHistogramAllocator::ClearLastCreatedReferenceForTesting() {
std::unique_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram(
PersistentHistogramData* histogram_data_ptr) {
- if (!histogram_data_ptr) {
- NOTREACHED();
+ if (!histogram_data_ptr)
return nullptr;
- }
// Sparse histograms are quite different so handle them as a special case.
if (histogram_data_ptr->histogram_type == SPARSE_HISTOGRAM) {
@@ -547,16 +537,13 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram(
if (!ranges_data || histogram_bucket_count < 2 ||
histogram_bucket_count >= max_buckets ||
allocated_bytes < required_bytes) {
- NOTREACHED();
return nullptr;
}
std::unique_ptr<const BucketRanges> created_ranges = CreateRangesFromData(
ranges_data, histogram_ranges_checksum, histogram_bucket_count + 1);
- if (!created_ranges) {
- NOTREACHED();
+ if (!created_ranges)
return nullptr;
- }
const BucketRanges* ranges =
StatisticsRecorder::RegisterOrDeleteDuplicateRanges(
created_ranges.release());
@@ -567,7 +554,6 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram(
if (counts_bytes == 0 ||
(counts_ref != 0 &&
memory_allocator_->GetAllocSize(counts_ref) < counts_bytes)) {
- NOTREACHED();
return nullptr;
}
@@ -623,7 +609,7 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram(
DCHECK(histogram);
break;
default:
- NOTREACHED();
+ return nullptr;
}
if (histogram) {
@@ -710,7 +696,6 @@ bool GlobalHistogramAllocator::CreateWithFile(
}
if (!mmfile->IsValid() ||
!FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, true)) {
- NOTREACHED() << file_path;
return false;
}
@@ -891,7 +876,6 @@ void GlobalHistogramAllocator::CreateWithSharedMemoryHandle(
new SharedMemory(handle, /*readonly=*/false));
if (!shm->Map(size) ||
!SharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(*shm)) {
- NOTREACHED();
return;
}
diff --git a/chromium/base/metrics/single_sample_metrics_unittest.cc b/chromium/base/metrics/single_sample_metrics_unittest.cc
index 5a6d1595207..d4d5913d638 100644
--- a/chromium/base/metrics/single_sample_metrics_unittest.cc
+++ b/chromium/base/metrics/single_sample_metrics_unittest.cc
@@ -7,7 +7,7 @@
#include "base/memory/ptr_util.h"
#include "base/metrics/dummy_histogram.h"
#include "base/test/gtest_util.h"
-#include "base/test/histogram_tester.h"
+#include "base/test/metrics/histogram_tester.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
diff --git a/chromium/base/native_library_fuchsia.cc b/chromium/base/native_library_fuchsia.cc
index 1d74273cbcf..c50650c9853 100644
--- a/chromium/base/native_library_fuchsia.cc
+++ b/chromium/base/native_library_fuchsia.cc
@@ -5,7 +5,8 @@
#include "base/native_library.h"
#include <fcntl.h>
-#include <fdio/io.h>
+#include <lib/fdio/io.h>
+#include <lib/zx/vmo.h>
#include <stdio.h>
#include <zircon/dlfcn.h>
#include <zircon/status.h>
@@ -15,7 +16,6 @@
#include "base/files/file.h"
#include "base/files/file_path.h"
#include "base/fuchsia/fuchsia_logging.h"
-#include "base/fuchsia/scoped_zx_handle.h"
#include "base/logging.h"
#include "base/path_service.h"
#include "base/posix/safe_strerror.h"
@@ -53,9 +53,9 @@ NativeLibrary LoadNativeLibraryWithOptions(const FilePath& library_path,
return nullptr;
}
- base::ScopedZxHandle vmo;
- zx_status_t status =
- fdio_get_vmo_clone(library.GetPlatformFile(), vmo.receive());
+ zx::vmo vmo;
+ zx_status_t status = fdio_get_vmo_clone(library.GetPlatformFile(),
+ vmo.reset_and_get_address());
if (status != ZX_OK) {
if (error) {
error->message = base::StringPrintf("fdio_get_vmo_clone: %s",
diff --git a/chromium/base/nix/xdg_util.cc b/chromium/base/nix/xdg_util.cc
index 109624a1e5a..9ff4d88e830 100644
--- a/chromium/base/nix/xdg_util.cc
+++ b/chromium/base/nix/xdg_util.cc
@@ -84,6 +84,8 @@ DesktopEnvironment GetDesktopEnvironment(Environment* env) {
}
if (xdg_current_desktop == "Pantheon")
return DESKTOP_ENVIRONMENT_PANTHEON;
+ if (xdg_current_desktop == "XFCE")
+ return DESKTOP_ENVIRONMENT_XFCE;
}
// DESKTOP_SESSION was what everyone used in 2010.
diff --git a/chromium/base/observer_list_unittest.cc b/chromium/base/observer_list_unittest.cc
index 37629ef82fc..1470b90e25f 100644
--- a/chromium/base/observer_list_unittest.cc
+++ b/chromium/base/observer_list_unittest.cc
@@ -24,6 +24,7 @@
#include "base/test/scoped_task_environment.h"
#include "base/threading/platform_thread.h"
#include "base/threading/thread_restrictions.h"
+#include "build/build_config.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -129,7 +130,9 @@ class AddRemoveThread : public PlatformThread::Delegate,
// After ready_ is signaled, loop_ is only accessed by the main test thread
// (i.e. not this thread) in particular by Quit() which causes Run() to
// return, and we "control" loop_ again.
- RunLoop().Run();
+ RunLoop run_loop;
+ quit_loop_ = run_loop.QuitClosure();
+ run_loop.Run();
delete loop_;
loop_ = reinterpret_cast<MessageLoop*>(0xdeadbeef);
delete this;
@@ -160,10 +163,7 @@ class AddRemoveThread : public PlatformThread::Delegate,
}
// This function is only callable from the main thread.
- void Quit() {
- loop_->task_runner()->PostTask(
- FROM_HERE, RunLoop::QuitCurrentWhenIdleClosureDeprecated());
- }
+ void Quit() { std::move(quit_loop_).Run(); }
void Observe(int x) override {
count_observes_++;
@@ -191,6 +191,8 @@ class AddRemoveThread : public PlatformThread::Delegate,
bool do_notifies_; // Whether these threads should do notifications.
WaitableEvent* ready_;
+ base::OnceClosure quit_loop_;
+
base::WeakPtrFactory<AddRemoveThread> weak_factory_;
};
@@ -633,7 +635,13 @@ static void ThreadSafeObserverHarness(int num_threads,
}
}
-TEST(ObserverListThreadSafeTest, CrossThreadObserver) {
+#if defined(OS_FUCHSIA)
+// TODO(crbug.com/738275): This is flaky on Fuchsia.
+#define MAYBE_CrossThreadObserver DISABLED_CrossThreadObserver
+#else
+#define MAYBE_CrossThreadObserver CrossThreadObserver
+#endif
+TEST(ObserverListThreadSafeTest, MAYBE_CrossThreadObserver) {
// Use 7 observer threads. Notifications only come from
// the main thread.
ThreadSafeObserverHarness(7, false);
diff --git a/chromium/base/optional.h b/chromium/base/optional.h
index c1d11ca7a18..f6263b8b7cd 100644
--- a/chromium/base/optional.h
+++ b/chromium/base/optional.h
@@ -575,32 +575,32 @@ class OPTIONAL_DECLSPEC_EMPTY_BASES Optional
}
constexpr const T* operator->() const {
- CHECK(storage_.is_populated_);
+ DCHECK(storage_.is_populated_);
return &storage_.value_;
}
constexpr T* operator->() {
- CHECK(storage_.is_populated_);
+ DCHECK(storage_.is_populated_);
return &storage_.value_;
}
constexpr const T& operator*() const & {
- CHECK(storage_.is_populated_);
+ DCHECK(storage_.is_populated_);
return storage_.value_;
}
constexpr T& operator*() & {
- CHECK(storage_.is_populated_);
+ DCHECK(storage_.is_populated_);
return storage_.value_;
}
constexpr const T&& operator*() const && {
- CHECK(storage_.is_populated_);
+ DCHECK(storage_.is_populated_);
return std::move(storage_.value_);
}
constexpr T&& operator*() && {
- CHECK(storage_.is_populated_);
+ DCHECK(storage_.is_populated_);
return std::move(storage_.value_);
}
@@ -636,7 +636,7 @@ class OPTIONAL_DECLSPEC_EMPTY_BASES Optional
static_assert(std::is_convertible<U, T>::value,
"U must be convertible to T");
return storage_.is_populated_
- ? value()
+ ? storage_.value_
: static_cast<T>(std::forward<U>(default_value));
}
@@ -648,7 +648,7 @@ class OPTIONAL_DECLSPEC_EMPTY_BASES Optional
static_assert(std::is_convertible<U, T>::value,
"U must be convertible to T");
return storage_.is_populated_
- ? std::move(value())
+ ? std::move(storage_.value_)
: static_cast<T>(std::forward<U>(default_value));
}
diff --git a/chromium/base/path_service_unittest.cc b/chromium/base/path_service_unittest.cc
index 8fcd673ca3f..cf69ef111c1 100644
--- a/chromium/base/path_service_unittest.cc
+++ b/chromium/base/path_service_unittest.cc
@@ -30,7 +30,7 @@ bool ReturnsValidPath(int dir_type) {
// Some paths might not exist on some platforms in which case confirming
// |result| is true and !path.empty() is the best we can do.
bool check_path_exists = true;
-#if defined(OS_POSIX) && !defined(OS_FUCHSIA)
+#if defined(OS_POSIX)
// If chromium has never been started on this account, the cache path may not
// exist.
if (dir_type == DIR_CACHE)
@@ -117,7 +117,7 @@ TEST_F(PathServiceTest, Get) {
++key) {
EXPECT_PRED1(ReturnsValidPath, key);
}
-#elif defined(OS_POSIX) && !defined(OS_FUCHSIA)
+#elif defined(OS_POSIX)
for (int key = PATH_POSIX_START + 1; key < PATH_POSIX_END;
++key) {
EXPECT_PRED1(ReturnsValidPath, key);
diff --git a/chromium/base/pending_task.h b/chromium/base/pending_task.h
index 495015ba661..b71a371f22d 100644
--- a/chromium/base/pending_task.h
+++ b/chromium/base/pending_task.h
@@ -44,9 +44,9 @@ struct BASE_EXPORT PendingTask {
// The time when the task should be run.
base::TimeTicks delayed_run_time;
- // Task backtrace. mutable so it can be set while annotating const PendingTask
- // objects from TaskAnnotator::DidQueueTask().
- mutable std::array<const void*, 4> task_backtrace = {};
+ // Chain of up-to-four symbols of the parent tasks which led to this one being
+ // posted.
+ std::array<const void*, 4> task_backtrace = {};
// Secondary sort key for run time.
int sequence_num = 0;
diff --git a/chromium/base/posix/eintr_wrapper.h b/chromium/base/posix/eintr_wrapper.h
index c0ffced59b8..0e6e4379535 100644
--- a/chromium/base/posix/eintr_wrapper.h
+++ b/chromium/base/posix/eintr_wrapper.h
@@ -19,7 +19,7 @@
#include "build/build_config.h"
-#if defined(OS_POSIX) && !defined(OS_FUCHSIA)
+#if defined(OS_POSIX)
#include <errno.h>
@@ -58,11 +58,11 @@
eintr_wrapper_result; \
})
-#else // !OS_POSIX || OS_FUCHSIA
+#else // !OS_POSIX
#define HANDLE_EINTR(x) (x)
#define IGNORE_EINTR(x) (x)
-#endif // !OS_POSIX || OS_FUCHSIA
+#endif // !OS_POSIX
#endif // BASE_POSIX_EINTR_WRAPPER_H_
diff --git a/chromium/base/power_monitor/power_monitor.cc b/chromium/base/power_monitor/power_monitor.cc
index 30e06a2a833..b0b92e6bda7 100644
--- a/chromium/base/power_monitor/power_monitor.cc
+++ b/chromium/base/power_monitor/power_monitor.cc
@@ -7,6 +7,7 @@
#include <utility>
#include "base/power_monitor/power_monitor_source.h"
+#include "base/trace_event/trace_event.h"
namespace base {
@@ -20,6 +21,7 @@ PowerMonitor::PowerMonitor(std::unique_ptr<PowerMonitorSource> source)
}
PowerMonitor::~PowerMonitor() {
+ source_->Shutdown();
DCHECK_EQ(this, g_power_monitor);
g_power_monitor = nullptr;
}
@@ -53,11 +55,15 @@ void PowerMonitor::NotifyPowerStateChange(bool battery_in_use) {
}
void PowerMonitor::NotifySuspend() {
+ TRACE_EVENT_INSTANT0("base", "PowerMonitor::NotifySuspend",
+ TRACE_EVENT_SCOPE_GLOBAL);
DVLOG(1) << "Power Suspending";
observers_->Notify(FROM_HERE, &PowerObserver::OnSuspend);
}
void PowerMonitor::NotifyResume() {
+ TRACE_EVENT_INSTANT0("base", "PowerMonitor::NotifyResume",
+ TRACE_EVENT_SCOPE_GLOBAL);
DVLOG(1) << "Power Resuming";
observers_->Notify(FROM_HERE, &PowerObserver::OnResume);
}
diff --git a/chromium/base/power_monitor/power_monitor.h b/chromium/base/power_monitor/power_monitor.h
index e025b324011..b8e02e50ead 100644
--- a/chromium/base/power_monitor/power_monitor.h
+++ b/chromium/base/power_monitor/power_monitor.h
@@ -27,7 +27,8 @@ class BASE_EXPORT PowerMonitor {
static PowerMonitor* Get();
// Add and remove an observer.
- // Can be called from any thread.
+ // Can be called from any thread. |observer| is notified on the sequence
+ // from which it was registered.
// Must not be called from within a notification callback.
void AddObserver(PowerObserver* observer);
void RemoveObserver(PowerObserver* observer);
@@ -44,7 +45,7 @@ class BASE_EXPORT PowerMonitor {
void NotifySuspend();
void NotifyResume();
- scoped_refptr<ObserverListThreadSafe<PowerObserver> > observers_;
+ scoped_refptr<ObserverListThreadSafe<PowerObserver>> observers_;
std::unique_ptr<PowerMonitorSource> source_;
DISALLOW_COPY_AND_ASSIGN(PowerMonitor);
diff --git a/chromium/base/power_monitor/power_monitor_device_source.cc b/chromium/base/power_monitor/power_monitor_device_source.cc
index 5df58003375..f42065499f2 100644
--- a/chromium/base/power_monitor/power_monitor_device_source.cc
+++ b/chromium/base/power_monitor/power_monitor_device_source.cc
@@ -25,4 +25,9 @@ PowerMonitorDeviceSource::~PowerMonitorDeviceSource() {
#endif
}
+// PowerMonitorDeviceSource does not need to take any special action to ensure
+// that it doesn't callback into PowerMonitor after this phase of shutdown has
+// completed.
+void PowerMonitorDeviceSource::Shutdown() {}
+
} // namespace base
diff --git a/chromium/base/power_monitor/power_monitor_device_source.h b/chromium/base/power_monitor/power_monitor_device_source.h
index 1e2c885fa45..fc19b2435f2 100644
--- a/chromium/base/power_monitor/power_monitor_device_source.h
+++ b/chromium/base/power_monitor/power_monitor_device_source.h
@@ -28,6 +28,8 @@ class BASE_EXPORT PowerMonitorDeviceSource : public PowerMonitorSource {
PowerMonitorDeviceSource();
~PowerMonitorDeviceSource() override;
+ void Shutdown() override;
+
#if defined(OS_MACOSX)
// Allocate system resources needed by the PowerMonitor class.
//
diff --git a/chromium/base/power_monitor/power_monitor_device_source_ios.mm b/chromium/base/power_monitor/power_monitor_device_source_ios.mm
index 3e86b2e246c..935fa29ae07 100644
--- a/chromium/base/power_monitor/power_monitor_device_source_ios.mm
+++ b/chromium/base/power_monitor/power_monitor_device_source_ios.mm
@@ -9,8 +9,17 @@
namespace base {
bool PowerMonitorDeviceSource::IsOnBatteryPowerImpl() {
- NOTIMPLEMENTED();
+#if TARGET_IPHONE_SIMULATOR
return false;
+#else
+ UIDevice* currentDevice = [UIDevice currentDevice];
+ BOOL isCurrentAppMonitoringBattery = currentDevice.isBatteryMonitoringEnabled;
+ [UIDevice currentDevice].batteryMonitoringEnabled = YES;
+ UIDeviceBatteryState batteryState = [UIDevice currentDevice].batteryState;
+ currentDevice.batteryMonitoringEnabled = isCurrentAppMonitoringBattery;
+ DCHECK(batteryState != UIDeviceBatteryStateUnknown);
+ return batteryState == UIDeviceBatteryStateUnplugged;
+#endif
}
void PowerMonitorDeviceSource::PlatformInit() {
diff --git a/chromium/base/power_monitor/power_monitor_source.h b/chromium/base/power_monitor/power_monitor_source.h
index b69cbf8317d..dd22bb619de 100644
--- a/chromium/base/power_monitor/power_monitor_source.h
+++ b/chromium/base/power_monitor/power_monitor_source.h
@@ -31,15 +31,20 @@ class BASE_EXPORT PowerMonitorSource {
// Is the computer currently on battery power. Can be called on any thread.
bool IsOnBatteryPower();
+ // Called by PowerMonitor just before PowerMonitor destroys both itself and
+ // this instance). After return from this call it is no longer safe for
+ // subclasses to call into PowerMonitor (e.g., via PowerMonitor::Get(). Hence,
+ // subclasses should take any necessary actions here to ensure that after
+ // return from this invocation they will no longer make any calls on
+ // PowerMonitor.
+ virtual void Shutdown() = 0;
+
protected:
friend class PowerMonitorTest;
// Friend function that is allowed to access the protected ProcessPowerEvent.
friend void ProcessPowerEventHelper(PowerEvent);
- // Get the process-wide PowerMonitorSource (if not present, returns NULL).
- static PowerMonitorSource* Get();
-
// ProcessPowerEvent should only be called from a single thread, most likely
// the UI thread or, in child processes, the IO thread.
static void ProcessPowerEvent(PowerEvent event_id);
diff --git a/chromium/base/process/kill.h b/chromium/base/process/kill.h
index 005b72e2aa4..9acfb8a7388 100644
--- a/chromium/base/process/kill.h
+++ b/chromium/base/process/kill.h
@@ -92,7 +92,7 @@ BASE_EXPORT bool KillProcessGroup(ProcessHandle process_group_id);
BASE_EXPORT TerminationStatus GetTerminationStatus(ProcessHandle handle,
int* exit_code);
-#if defined(OS_POSIX) && !defined(OS_FUCHSIA)
+#if defined(OS_POSIX)
// Send a kill signal to the process and then wait for the process to exit
// and get the termination status.
//
@@ -116,7 +116,7 @@ BASE_EXPORT TerminationStatus GetKnownDeadTerminationStatus(
// and then reaps it.
BASE_EXPORT void EnsureProcessGetsReaped(Process process);
#endif // defined(OS_LINUX)
-#endif // defined(OS_POSIX) && !defined(OS_FUCHSIA)
+#endif // defined(OS_POSIX)
// Registers |process| to be asynchronously monitored for termination, forcibly
// terminated if necessary, and reaped on exit. The caller should have signalled
diff --git a/chromium/base/process/launch.h b/chromium/base/process/launch.h
index b4530b755a2..7a2def2ef43 100644
--- a/chromium/base/process/launch.h
+++ b/chromium/base/process/launch.h
@@ -24,7 +24,7 @@
#if defined(OS_WIN)
#include <windows.h>
#elif defined(OS_FUCHSIA)
-#include <launchpad/launchpad.h>
+#include <lib/fdio/spawn.h>
#include <zircon/types.h>
#endif
@@ -39,6 +39,10 @@ class CommandLine;
#if defined(OS_WIN)
typedef std::vector<HANDLE> HandlesToInheritVector;
#elif defined(OS_FUCHSIA)
+struct PathToTransfer {
+ base::FilePath path;
+ zx_handle_t handle;
+};
struct HandleToTransfer {
uint32_t id;
zx_handle_t handle;
@@ -187,29 +191,30 @@ struct BASE_EXPORT LaunchOptions {
zx_handle_t job_handle = ZX_HANDLE_INVALID;
// Specifies additional handles to transfer (not duplicate) to the child
- // process. The handles remain valid in this process if launch fails.
- // Each entry is an <id,handle> pair, with an |id| created using the PA_HND()
- // macro. The child retrieves the handle |zx_get_startup_handle(id)|.
+ // process. Each entry is an <id,handle> pair, with an |id| created using the
+ // PA_HND() macro. The child retrieves the handle
+ // |zx_take_startup_handle(id)|. The supplied handles are consumed by
+ // LaunchProcess() even on failure.
HandlesToTransferVector handles_to_transfer;
- // If set, specifies which capabilities should be granted (cloned) to the
- // child process.
- // A zero value indicates that the child process will receive
- // no capabilities.
- // By default the child will inherit the same capabilities, job, and CWD
- // from the parent process.
- uint32_t clone_flags =
- LP_CLONE_FDIO_NAMESPACE | LP_CLONE_DEFAULT_JOB | LP_CLONE_FDIO_STDIO;
-
- // Specifies the namespace paths which are to be cloned in the child process'
- // namespace. If left unset, the child process will be launched with an empty
- // namespace.
- // This flag allows the parent to pass only the bare minimum OS capabilities
- // to the child process, so that the potential attack surface is reduced in
- // case child process is compromised.
- // Cannot be combined with the clone flag LP_CLONE_FDIO_NAMESPACE, which is
- // equivalent to cloning every path.
- std::vector<FilePath> paths_to_map;
+ // Specifies which basic capabilities to grant to the child process.
+ // By default the child process will receive the caller's complete namespace,
+ // access to the current base::fuchsia::DefaultJob(), handles for stdio and
+ // access to the dynamic library loader.
+ // Note that the child is always provided access to the loader service.
+ uint32_t spawn_flags = FDIO_SPAWN_CLONE_NAMESPACE | FDIO_SPAWN_CLONE_STDIO |
+ FDIO_SPAWN_CLONE_JOB;
+
+ // Specifies paths to clone from the calling process' namespace into that of
+ // the child process. If |paths_to_clone| is empty then the process will
+ // receive either a full copy of the parent's namespace, or an empty one,
+ // depending on whether FDIO_SPAWN_CLONE_NAMESPACE is set.
+ std::vector<FilePath> paths_to_clone;
+
+ // Specifies handles which will be installed as files or directories in the
+ // child process' namespace. Paths installed by |paths_to_clone| will be
+ // overridden by these entries.
+ std::vector<PathToTransfer> paths_to_transfer;
#endif // defined(OS_FUCHSIA)
#if defined(OS_POSIX)
diff --git a/chromium/base/process/launch_fuchsia.cc b/chromium/base/process/launch_fuchsia.cc
index 3bc75809f72..498cc0895c1 100644
--- a/chromium/base/process/launch_fuchsia.cc
+++ b/chromium/base/process/launch_fuchsia.cc
@@ -4,18 +4,19 @@
#include "base/process/launch.h"
-#include <fdio/limits.h>
-#include <fdio/namespace.h>
-#include <fdio/util.h>
-#include <launchpad/launchpad.h>
+#include <lib/fdio/limits.h>
+#include <lib/fdio/namespace.h>
+#include <lib/fdio/spawn.h>
+#include <lib/fdio/util.h>
+#include <lib/zx/job.h>
#include <stdint.h>
#include <unistd.h>
-#include <zircon/process.h>
#include <zircon/processargs.h>
#include "base/command_line.h"
#include "base/files/file_util.h"
#include "base/fuchsia/default_job.h"
+#include "base/fuchsia/file_utils.h"
#include "base/fuchsia/fuchsia_logging.h"
#include "base/logging.h"
#include "base/memory/ptr_util.h"
@@ -62,81 +63,33 @@ bool GetAppOutputInternal(const CommandLine& cmd_line,
return process.WaitForExit(exit_code);
}
-bool MapPathsToLaunchpad(const std::vector<FilePath>& paths_to_map,
- launchpad_t* lp) {
- zx_status_t status;
-
- // Build a array of null terminated strings, which which will be used as an
- // argument for launchpad_set_nametable().
- std::vector<const char*> paths_c_str;
- paths_c_str.reserve(paths_to_map.size());
-
- for (size_t paths_idx = 0; paths_idx < paths_to_map.size(); ++paths_idx) {
- const FilePath& next_path = paths_to_map[paths_idx];
- if (!PathExists(next_path)) {
- DLOG(ERROR) << "Path does not exist: " << next_path;
- return false;
- }
-
- File dir(next_path, File::FLAG_OPEN | File::FLAG_READ);
- ScopedPlatformFile scoped_fd(dir.TakePlatformFile());
- zx_handle_t handles[FDIO_MAX_HANDLES] = {};
- uint32_t types[FDIO_MAX_HANDLES] = {};
- zx_status_t num_handles =
- fdio_transfer_fd(scoped_fd.get(), 0, handles, types);
- // fdio_transfer_fd() returns number of transferred handles, or negative
- // error.
- if (num_handles <= 0) {
- DCHECK_LT(num_handles, 0);
- ZX_LOG(ERROR, num_handles) << "fdio_transfer_fd";
- return false;
- }
- ScopedZxHandle scoped_handle(handles[0]);
- ignore_result(scoped_fd.release());
-
- // Close the handles that we won't use.
- for (int i = 1; i < num_handles; ++i) {
- zx_handle_close(handles[i]);
- }
-
- if (types[0] != PA_FDIO_REMOTE) {
- LOG(ERROR) << "Handle type for " << next_path.AsUTF8Unsafe()
- << " is not PA_FDIO_REMOTE: " << types[0];
- return false;
- }
-
- // Add the handle to the child's nametable.
- // We use the macro PA_HND(..., <index>) to relate the handle to its
- // position in the nametable, which is stored as an array of path strings
- // |paths_str|.
- status = launchpad_add_handle(lp, scoped_handle.release(),
- PA_HND(PA_NS_DIR, paths_idx));
- if (status != ZX_OK) {
- ZX_LOG(ERROR, status) << "launchpad_add_handle";
- return false;
- }
- paths_c_str.push_back(next_path.value().c_str());
- }
-
- if (!paths_c_str.empty()) {
- status =
- launchpad_set_nametable(lp, paths_c_str.size(), paths_c_str.data());
- if (status != ZX_OK) {
- ZX_LOG(ERROR, status) << "launchpad_set_nametable";
- return false;
- }
- }
-
- return true;
+fdio_spawn_action_t FdioSpawnAction(uint32_t action) {
+ fdio_spawn_action_t new_action = {};
+ new_action.action = action;
+ return new_action;
}
-struct LaunchpadScopedTraits {
- static launchpad_t* InvalidValue() { return nullptr; }
+fdio_spawn_action_t FdioSpawnActionCloneFd(int local_fd, int target_fd) {
+ fdio_spawn_action_t action = FdioSpawnAction(FDIO_SPAWN_ACTION_CLONE_FD);
+ action.fd.local_fd = local_fd;
+ action.fd.target_fd = target_fd;
+ return action;
+}
- static void Free(launchpad_t* lp) { launchpad_destroy(lp); }
-};
+fdio_spawn_action_t FdioSpawnActionAddNamespaceEntry(const char* prefix,
+ zx_handle_t handle) {
+ fdio_spawn_action_t action = FdioSpawnAction(FDIO_SPAWN_ACTION_ADD_NS_ENTRY);
+ action.ns.prefix = prefix;
+ action.ns.handle = handle;
+ return action;
+}
-using ScopedLaunchpad = ScopedGeneric<launchpad_t*, LaunchpadScopedTraits>;
+fdio_spawn_action_t FdioSpawnActionAddHandle(uint32_t id, zx_handle_t handle) {
+ fdio_spawn_action_t action = FdioSpawnAction(FDIO_SPAWN_ACTION_ADD_HANDLE);
+ action.h.id = id;
+ action.h.handle = handle;
+ return action;
+}
} // namespace
@@ -149,44 +102,41 @@ Process LaunchProcess(const CommandLine& cmdline,
// unprivileged processes by default (no implicit capabilities are granted).
Process LaunchProcess(const std::vector<std::string>& argv,
const LaunchOptions& options) {
- std::vector<const char*> argv_cstr;
- argv_cstr.reserve(argv.size() + 1);
- for (const auto& arg : argv)
- argv_cstr.push_back(arg.c_str());
- argv_cstr.push_back(nullptr);
+ // fdio_spawn_etc() accepts an array of |fdio_spawn_action_t|, describing
+ // namespace entries, descriptors and handles to launch the child process
+ // with.
+ std::vector<fdio_spawn_action_t> spawn_actions;
- // Note that per launchpad.h, the intention is that launchpad_ functions are
- // used in a "builder" style. From launchpad_create() to launchpad_go() the
- // status is tracked in the launchpad_t object, and launchpad_go() reports on
- // the final status, and cleans up |lp| (assuming it was even created).
- zx_handle_t job = options.job_handle != ZX_HANDLE_INVALID ? options.job_handle
- : GetDefaultJob();
- DCHECK_NE(ZX_HANDLE_INVALID, job);
- ScopedLaunchpad lp;
- zx_status_t status;
- if ((status = launchpad_create(job, argv_cstr[0], lp.receive())) != ZX_OK) {
- ZX_LOG(ERROR, status) << "launchpad_create(job)";
- return Process();
- }
+ // Handles to be transferred to the child are owned by this vector, so that
+ // they they are closed on early-exit, and can be release()d otherwise.
+ std::vector<zx::handle> transferred_handles;
- if ((status = launchpad_load_from_file(lp.get(), argv_cstr[0])) != ZX_OK) {
- ZX_LOG(ERROR, status) << "launchpad_load_from_file(" << argv_cstr[0] << ")";
- return Process();
+ // Add caller-supplied handles for transfer. We must do this first to ensure
+ // that the handles are consumed even if some later step fails.
+ for (const auto& id_and_handle : options.handles_to_transfer) {
+ spawn_actions.push_back(
+ FdioSpawnActionAddHandle(id_and_handle.id, id_and_handle.handle));
+ transferred_handles.emplace_back(id_and_handle.handle);
}
- if ((status = launchpad_set_args(lp.get(), argv.size(), argv_cstr.data())) !=
- ZX_OK) {
- ZX_LOG(ERROR, status) << "launchpad_set_args";
- return Process();
- }
+ // Determine the job under which to launch the new process.
+ zx::unowned_job job = options.job_handle != ZX_HANDLE_INVALID
+ ? zx::unowned_job(options.job_handle)
+ : GetDefaultJob();
+ DCHECK(job->is_valid());
- uint32_t to_clone = options.clone_flags;
+ // Construct an |argv| array of C-strings from the supplied std::strings.
+ std::vector<const char*> argv_cstr;
+ argv_cstr.reserve(argv.size() + 1);
+ for (const auto& arg : argv)
+ argv_cstr.push_back(arg.c_str());
+ argv_cstr.push_back(nullptr);
- std::unique_ptr<char* []> new_environ;
- char* const empty_environ = nullptr;
- char* const* old_environ = environ;
- if (options.clear_environ)
- old_environ = &empty_environ;
+ // Determine the environment to pass to the new process.
+ // If |clear_environ|, |environ| or |current_directory| are set then we
+ // construct a new (possibly empty) environment, otherwise we let fdio_spawn()
+ // clone the caller's environment into the new process.
+ uint32_t spawn_flags = FDIO_SPAWN_CLONE_LDSVC | options.spawn_flags;
EnvironmentMap environ_modifications = options.environ;
if (!options.current_directory.empty()) {
@@ -197,74 +147,83 @@ Process LaunchProcess(const std::vector<std::string>& argv,
environ_modifications["PWD"] = cwd.value();
}
- if (to_clone & LP_CLONE_DEFAULT_JOB) {
- // Override Fuchsia's built in default job cloning behavior with our own
- // logic which uses |job| instead of zx_job_default().
- // This logic is based on the launchpad implementation.
- zx_handle_t job_duplicate = ZX_HANDLE_INVALID;
- if ((status = zx_handle_duplicate(job, ZX_RIGHT_SAME_RIGHTS,
- &job_duplicate)) != ZX_OK) {
- ZX_LOG(ERROR, status) << "zx_handle_duplicate";
- return Process();
- }
- launchpad_add_handle(lp.get(), job_duplicate, PA_HND(PA_JOB_DEFAULT, 0));
- to_clone &= ~LP_CLONE_DEFAULT_JOB;
- }
-
- if (!environ_modifications.empty())
+ std::unique_ptr<char* []> new_environ;
+ if (!environ_modifications.empty()) {
+ char* const empty_environ = nullptr;
+ char* const* old_environ = options.clear_environ ? &empty_environ : environ;
new_environ = AlterEnvironment(old_environ, environ_modifications);
-
- if (!environ_modifications.empty() || options.clear_environ)
- launchpad_set_environ(lp.get(), new_environ.get());
- else
- to_clone |= LP_CLONE_ENVIRON;
-
- if (!options.paths_to_map.empty()) {
- DCHECK(!(to_clone & LP_CLONE_FDIO_NAMESPACE));
- if (!MapPathsToLaunchpad(options.paths_to_map, lp.get())) {
- return Process();
- }
+ } else if (!options.clear_environ) {
+ spawn_flags |= FDIO_SPAWN_CLONE_ENVIRON;
}
- launchpad_clone(lp.get(), to_clone);
-
- // Clone the mapped file-descriptors, plus any of the stdio descriptors
- // which were not explicitly specified.
- bool stdio_already_mapped[3] = {false};
- for (const auto& src_target : options.fds_to_remap) {
- if (static_cast<size_t>(src_target.second) <
- arraysize(stdio_already_mapped)) {
- stdio_already_mapped[src_target.second] = true;
+ // Add actions to clone handles for any specified paths into the new process'
+ // namespace.
+ std::vector<const char*> mapped_paths_cstr;
+ if (!options.paths_to_clone.empty() || !options.paths_to_transfer.empty()) {
+ DCHECK((options.spawn_flags & FDIO_SPAWN_CLONE_NAMESPACE) == 0);
+ mapped_paths_cstr.reserve(options.paths_to_clone.size() +
+ options.paths_to_transfer.size());
+ transferred_handles.reserve(transferred_handles.size() +
+ options.paths_to_clone.size() +
+ options.paths_to_transfer.size());
+
+ for (const auto& path_to_transfer : options.paths_to_transfer) {
+ zx::handle handle(path_to_transfer.handle);
+ spawn_actions.push_back(FdioSpawnActionAddNamespaceEntry(
+ path_to_transfer.path.value().c_str(), handle.get()));
+ mapped_paths_cstr.push_back(path_to_transfer.path.value().c_str());
+ transferred_handles.push_back(std::move(handle));
}
- launchpad_clone_fd(lp.get(), src_target.first, src_target.second);
- }
- if (to_clone & LP_CLONE_FDIO_STDIO) {
- for (size_t stdio_fd = 0; stdio_fd < arraysize(stdio_already_mapped);
- ++stdio_fd) {
- if (!stdio_already_mapped[stdio_fd])
- launchpad_clone_fd(lp.get(), stdio_fd, stdio_fd);
+
+ for (const auto& path_to_clone : options.paths_to_clone) {
+ zx::handle handle = fuchsia::GetHandleFromFile(
+ base::File(base::FilePath(path_to_clone),
+ base::File::FLAG_OPEN | base::File::FLAG_READ));
+ if (!handle) {
+ LOG(WARNING) << "Could not open handle for path: " << path_to_clone;
+ return base::Process();
+ }
+
+ spawn_actions.push_back(FdioSpawnActionAddNamespaceEntry(
+ path_to_clone.value().c_str(), handle.get()));
+ mapped_paths_cstr.push_back(path_to_clone.value().c_str());
+ transferred_handles.push_back(std::move(handle));
}
- to_clone &= ~LP_CLONE_FDIO_STDIO;
}
- for (const auto& id_and_handle : options.handles_to_transfer) {
- launchpad_add_handle(lp.get(), id_and_handle.handle, id_and_handle.id);
+ // Add any file-descriptors to be cloned into the new process.
+ // Note that if FDIO_SPAWN_CLONE_STDIO is set, then any stdio entries in
+ // |fds_to_remap| will be used in place of the parent process' descriptors.
+ for (const auto& src_target : options.fds_to_remap) {
+ spawn_actions.push_back(
+ FdioSpawnActionCloneFd(src_target.first, src_target.second));
}
- zx_handle_t process_handle;
- const char* errmsg;
- if ((status = launchpad_go(lp.get(), &process_handle, &errmsg)) != ZX_OK) {
- ZX_LOG(ERROR, status) << "launchpad_go failed: " << errmsg;
+ zx::process process_handle;
+ // fdio_spawn_etc() will write a null-terminated scring to |error_message| in
+ // case of failure, so we avoid unnecessarily initializing it here.
+ char error_message[FDIO_SPAWN_ERR_MSG_MAX_LENGTH];
+ zx_status_t status = fdio_spawn_etc(
+ job->get(), spawn_flags, argv_cstr[0], argv_cstr.data(),
+ new_environ.get(), spawn_actions.size(), spawn_actions.data(),
+ process_handle.reset_and_get_address(), error_message);
+
+ // fdio_spawn_etc() will close all handles specified in add-handle actions,
+ // regardless of whether it succeeds or fails, so release our copies.
+ for (auto& transferred_handle : transferred_handles)
+ ignore_result(transferred_handle.release());
+
+ if (status != ZX_OK) {
+ ZX_LOG(ERROR, status) << "fdio_spawn: " << error_message;
return Process();
}
- ignore_result(lp.release()); // launchpad_go() took ownership.
- Process process(process_handle);
+ // Wrap the handle into a Process, and wait for it to terminate, if requested.
+ Process process(process_handle.release());
if (options.wait) {
status = zx_object_wait_one(process.Handle(), ZX_TASK_TERMINATED,
ZX_TIME_INFINITE, nullptr);
- DCHECK(status == ZX_OK)
- << "zx_object_wait_one: " << zx_status_get_string(status);
+ ZX_DCHECK(status == ZX_OK, status) << "zx_object_wait_one";
}
return process;
diff --git a/chromium/base/process/launch_win.cc b/chromium/base/process/launch_win.cc
index 0ae6820dfd9..451bbab90a8 100644
--- a/chromium/base/process/launch_win.cc
+++ b/chromium/base/process/launch_win.cc
@@ -255,12 +255,6 @@ Process LaunchProcess(const string16& cmdline,
startup_info->hStdError = options.stderr_handle;
}
- const bool launch_suspended =
- options.job_handle || options.grant_foreground_privilege;
-
- if (launch_suspended)
- flags |= CREATE_SUSPENDED;
-
if (options.job_handle) {
// If this code is run under a debugger, the launched process is
// automatically associated with a job object created by the debugger.
@@ -280,6 +274,10 @@ Process LaunchProcess(const string16& cmdline,
: options.current_directory.value().c_str();
string16 writable_cmdline(cmdline);
+ DCHECK(!(flags & CREATE_SUSPENDED))
+ << "Creating a suspended process can lead to hung processes if the "
+ << "launching process is killed before it assigns the process to the"
+ << "job. https://crbug.com/820996";
if (options.as_user) {
flags |= CREATE_UNICODE_ENVIRONMENT;
void* enviroment_block = nullptr;
@@ -324,9 +322,6 @@ Process LaunchProcess(const string16& cmdline,
DPLOG(ERROR) << "Failed to grant foreground privilege to launched process";
}
- if (launch_suspended)
- ResumeThread(process_info.thread_handle());
-
if (options.wait)
WaitForSingleObject(process_info.process_handle(), INFINITE);
diff --git a/chromium/base/process/memory_linux.cc b/chromium/base/process/memory_linux.cc
index 21b20696aca..171753c71ea 100644
--- a/chromium/base/process/memory_linux.cc
+++ b/chromium/base/process/memory_linux.cc
@@ -18,8 +18,8 @@
#include "build/build_config.h"
#if defined(USE_TCMALLOC)
-#include "third_party/tcmalloc/chromium/src/config.h"
-#include "third_party/tcmalloc/chromium/src/gperftools/tcmalloc.h"
+#include "third_party/tcmalloc/gperftools-2.0/chromium/src/config.h"
+#include "third_party/tcmalloc/gperftools-2.0/chromium/src/gperftools/tcmalloc.h"
#endif
namespace base {
diff --git a/chromium/base/process/process.h b/chromium/base/process/process.h
index c06998edca7..674cea3d027 100644
--- a/chromium/base/process/process.h
+++ b/chromium/base/process/process.h
@@ -16,7 +16,7 @@
#endif
#if defined(OS_FUCHSIA)
-#include "base/fuchsia/scoped_zx_handle.h"
+#include <lib/zx/process.h>
#endif
#if defined(OS_MACOSX)
@@ -188,7 +188,7 @@ class BASE_EXPORT Process {
#if defined(OS_WIN)
win::ScopedHandle process_;
#elif defined(OS_FUCHSIA)
- ScopedZxHandle process_;
+ zx::process process_;
#else
ProcessHandle process_;
#endif
diff --git a/chromium/base/process/process_fuchsia.cc b/chromium/base/process/process_fuchsia.cc
index 94bce344a10..ea6fea9434c 100644
--- a/chromium/base/process/process_fuchsia.cc
+++ b/chromium/base/process/process_fuchsia.cc
@@ -4,11 +4,13 @@
#include "base/process/process.h"
+#include <lib/zx/process.h>
#include <zircon/process.h>
#include <zircon/syscalls.h>
#include "base/debug/activity_tracker.h"
#include "base/fuchsia/default_job.h"
+#include "base/fuchsia/fuchsia_logging.h"
#include "base/strings/stringprintf.h"
namespace base {
@@ -48,15 +50,15 @@ Process Process::Open(ProcessId pid) {
return Current();
// While a process with object id |pid| might exist, the job returned by
- // zx_job_default() might not contain it, so this call can fail.
- ScopedZxHandle handle;
- zx_status_t status = zx_object_get_child(
- GetDefaultJob(), pid, ZX_RIGHT_SAME_RIGHTS, handle.receive());
+ // zx::job::default_job() might not contain it, so this call can fail.
+ zx::process process;
+ zx_status_t status =
+ GetDefaultJob()->get_child(pid, ZX_RIGHT_SAME_RIGHTS, &process);
if (status != ZX_OK) {
- DLOG(ERROR) << "zx_object_get_child failed: " << status;
+ ZX_DLOG(ERROR, status) << "zx_object_get_child";
return Process();
}
- return Process(handle.release());
+ return Process(process.release());
}
// static
@@ -68,10 +70,11 @@ Process Process::OpenWithExtraPrivileges(ProcessId pid) {
// static
Process Process::DeprecatedGetProcessFromHandle(ProcessHandle handle) {
DCHECK_NE(handle, GetCurrentProcessHandle());
- ScopedZxHandle out;
- if (zx_handle_duplicate(handle, ZX_RIGHT_SAME_RIGHTS, out.receive()) !=
- ZX_OK) {
- DLOG(ERROR) << "zx_handle_duplicate failed: " << handle;
+ zx::process out;
+ zx_status_t result =
+ zx::unowned_process(handle)->duplicate(ZX_RIGHT_SAME_RIGHTS, &out);
+ if (result != ZX_OK) {
+ ZX_DLOG(ERROR, result) << "zx_handle_duplicate(from_handle)";
return Process();
}
@@ -103,10 +106,10 @@ Process Process::Duplicate() const {
if (!IsValid())
return Process();
- ScopedZxHandle out;
- if (zx_handle_duplicate(process_.get(), ZX_RIGHT_SAME_RIGHTS,
- out.receive()) != ZX_OK) {
- DLOG(ERROR) << "zx_handle_duplicate failed: " << process_.get();
+ zx::process out;
+ zx_status_t result = process_.duplicate(ZX_RIGHT_SAME_RIGHTS, &out);
+ if (result != ZX_OK) {
+ ZX_DLOG(ERROR, result) << "zx_handle_duplicate";
return Process();
}
@@ -130,19 +133,17 @@ void Process::Close() {
bool Process::Terminate(int exit_code, bool wait) const {
// exit_code isn't supportable. https://crbug.com/753490.
zx_status_t status = zx_task_kill(Handle());
- // TODO(scottmg): Put these LOG/CHECK back to DLOG/DCHECK after
- // https://crbug.com/750756 is diagnosed.
if (status == ZX_OK && wait) {
zx_signals_t signals;
status = zx_object_wait_one(Handle(), ZX_TASK_TERMINATED,
zx_deadline_after(ZX_SEC(60)), &signals);
if (status != ZX_OK) {
- LOG(ERROR) << "Error waiting for process exit: " << status;
+ ZX_DLOG(ERROR, status) << "zx_object_wait_one(terminate)";
} else {
CHECK(signals & ZX_TASK_TERMINATED);
}
} else if (status != ZX_OK) {
- LOG(ERROR) << "Unable to terminate process: " << status;
+ ZX_DLOG(ERROR, status) << "zx_task_kill";
}
return status >= 0;
@@ -162,28 +163,11 @@ bool Process::WaitForExitWithTimeout(TimeDelta timeout, int* exit_code) const {
zx_time_t deadline = timeout == TimeDelta::Max()
? ZX_TIME_INFINITE
: (TimeTicks::Now() + timeout).ToZxTime();
- // TODO(scottmg): https://crbug.com/755282
- const bool kOnBot = getenv("CHROME_HEADLESS") != nullptr;
- if (kOnBot) {
- LOG(ERROR) << base::StringPrintf(
- "going to wait for process %x (deadline=%zu, now=%zu)", process_.get(),
- deadline, TimeTicks::Now().ToZxTime());
- }
zx_signals_t signals_observed = 0;
zx_status_t status = zx_object_wait_one(process_.get(), ZX_TASK_TERMINATED,
deadline, &signals_observed);
-
- // TODO(scottmg): Make these LOGs into DLOGs after https://crbug.com/750756 is
- // fixed.
- if (status != ZX_OK && status != ZX_ERR_TIMED_OUT) {
- LOG(ERROR) << "zx_object_wait_one failed, status=" << status;
- return false;
- }
- if (status == ZX_ERR_TIMED_OUT) {
- zx_time_t now = TimeTicks::Now().ToZxTime();
- LOG(ERROR) << "zx_object_wait_one timed out, signals=" << signals_observed
- << ", deadline=" << deadline << ", now=" << now
- << ", delta=" << (now - deadline);
+ if (status != ZX_OK) {
+ ZX_DLOG(ERROR, status) << "zx_object_wait_one";
return false;
}
@@ -191,7 +175,7 @@ bool Process::WaitForExitWithTimeout(TimeDelta timeout, int* exit_code) const {
status = zx_object_get_info(process_.get(), ZX_INFO_PROCESS, &proc_info,
sizeof(proc_info), nullptr, nullptr);
if (status != ZX_OK) {
- LOG(ERROR) << "zx_object_get_info failed, status=" << status;
+ ZX_DLOG(ERROR, status) << "zx_object_get_info";
if (exit_code)
*exit_code = -1;
return false;
diff --git a/chromium/base/process/process_info.h b/chromium/base/process/process_info.h
index 5138e2475a9..f06370e5bf7 100644
--- a/chromium/base/process/process_info.h
+++ b/chromium/base/process/process_info.h
@@ -23,6 +23,7 @@ class BASE_EXPORT CurrentProcessInfo {
#if defined(OS_WIN)
enum IntegrityLevel {
INTEGRITY_UNKNOWN,
+ UNTRUSTED_INTEGRITY,
LOW_INTEGRITY,
MEDIUM_INTEGRITY,
HIGH_INTEGRITY,
diff --git a/chromium/base/process/process_info_win.cc b/chromium/base/process/process_info_win.cc
index 23e93e335eb..cb359e78ffb 100644
--- a/chromium/base/process/process_info_win.cc
+++ b/chromium/base/process/process_info_win.cc
@@ -45,6 +45,7 @@ IntegrityLevel GetCurrentProcessIntegrityLevel() {
if (::GetTokenInformation(process_token, TokenIntegrityLevel, nullptr, 0,
&token_info_length) ||
::GetLastError() != ERROR_INSUFFICIENT_BUFFER) {
+ NOTREACHED();
return INTEGRITY_UNKNOWN;
}
@@ -53,6 +54,7 @@ IntegrityLevel GetCurrentProcessIntegrityLevel() {
reinterpret_cast<TOKEN_MANDATORY_LABEL*>(token_label_bytes.get());
if (!::GetTokenInformation(process_token, TokenIntegrityLevel, token_label,
token_info_length, &token_info_length)) {
+ NOTREACHED();
return INTEGRITY_UNKNOWN;
}
@@ -61,6 +63,9 @@ IntegrityLevel GetCurrentProcessIntegrityLevel() {
static_cast<DWORD>(*::GetSidSubAuthorityCount(token_label->Label.Sid) -
1));
+ if (integrity_level < SECURITY_MANDATORY_LOW_RID)
+ return UNTRUSTED_INTEGRITY;
+
if (integrity_level < SECURITY_MANDATORY_MEDIUM_RID)
return LOW_INTEGRITY;
diff --git a/chromium/base/process/process_metrics.h b/chromium/base/process/process_metrics.h
index 0170a0c2db9..6bfd93ece74 100644
--- a/chromium/base/process/process_metrics.h
+++ b/chromium/base/process/process_metrics.h
@@ -267,12 +267,12 @@ BASE_EXPORT size_t GetPageSize();
// at once. If the number is unavailable, a conservative best guess is returned.
BASE_EXPORT size_t GetMaxFds();
-#if defined(OS_POSIX) && !defined(OS_FUCHSIA)
+#if defined(OS_POSIX)
// Increases the file descriptor soft limit to |max_descriptors| or the OS hard
// limit, whichever is lower. If the limit is already higher than
// |max_descriptors|, then nothing happens.
BASE_EXPORT void IncreaseFdLimitTo(unsigned int max_descriptors);
-#endif // defined(OS_POSIX) && !defined(OS_FUCHSIA)
+#endif // defined(OS_POSIX)
#if defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX) || \
defined(OS_ANDROID) || defined(OS_AIX) || defined(OS_FUCHSIA)
diff --git a/chromium/base/process/process_metrics_fuchsia.cc b/chromium/base/process/process_metrics_fuchsia.cc
index a34dff73150..417f7ccff5d 100644
--- a/chromium/base/process/process_metrics_fuchsia.cc
+++ b/chromium/base/process/process_metrics_fuchsia.cc
@@ -4,7 +4,7 @@
#include "base/process/process_metrics.h"
-#include <fdio/limits.h>
+#include <lib/fdio/limits.h>
namespace base {
diff --git a/chromium/base/process/process_posix.cc b/chromium/base/process/process_posix.cc
index 7645b78b19b..08a918c1a8f 100644
--- a/chromium/base/process/process_posix.cc
+++ b/chromium/base/process/process_posix.cc
@@ -201,11 +201,8 @@ bool WaitForExitWithTimeoutImpl(base::ProcessHandle handle,
}
int status;
- if (!WaitpidWithTimeout(handle, &status, timeout)) {
- // If multiple threads wait on the same |handle| then one wait will succeed
- // and the other will fail with errno set to ECHILD.
- return exited || (errno == ECHILD);
- }
+ if (!WaitpidWithTimeout(handle, &status, timeout))
+ return exited;
if (WIFSIGNALED(status)) {
if (exit_code)
*exit_code = -1;
@@ -342,7 +339,7 @@ bool Process::WaitForExitWithTimeout(TimeDelta timeout, int* exit_code) const {
// Record the event that this thread is blocking upon (for hang diagnosis).
base::debug::ScopedProcessWaitActivity process_activity(this);
- int local_exit_code;
+ int local_exit_code = 0;
bool exited = WaitForExitWithTimeoutImpl(Handle(), &local_exit_code, timeout);
if (exited) {
Exited(local_exit_code);
diff --git a/chromium/base/process/process_unittest.cc b/chromium/base/process/process_unittest.cc
index 9f678d141a1..219944df7a6 100644
--- a/chromium/base/process/process_unittest.cc
+++ b/chromium/base/process/process_unittest.cc
@@ -147,7 +147,7 @@ TEST_F(ProcessTest, Terminate) {
EXPECT_NE(TERMINATION_STATUS_STILL_RUNNING,
GetTerminationStatus(process.Handle(), &exit_code));
#if !defined(OS_POSIX) && !defined(OS_FUCHSIA)
- // The POSIX implementation actually ignores the exit_code.
+ // The POSIX & Fuchsia implementations actually ignore the exit_code.
EXPECT_EQ(kExpectedExitCode, exit_code);
#endif
}
diff --git a/chromium/base/process/process_util_unittest.cc b/chromium/base/process/process_util_unittest.cc
index 89466695fcf..4e788b78a8e 100644
--- a/chromium/base/process/process_util_unittest.cc
+++ b/chromium/base/process/process_util_unittest.cc
@@ -45,17 +45,19 @@
#if defined(OS_POSIX)
#include <sys/resource.h>
#endif
-#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+#if defined(OS_POSIX)
#include <dlfcn.h>
#include <errno.h>
-#include <fcntl.h>
#include <sched.h>
#include <signal.h>
-#include <sys/socket.h>
-#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#endif
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include <fcntl.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#endif
#if defined(OS_WIN)
#include <windows.h>
#endif
@@ -67,11 +69,14 @@
#include "third_party/lss/linux_syscall_support.h"
#endif
#if defined(OS_FUCHSIA)
-#include <fdio/limits.h>
+#include <lib/fdio/limits.h>
#include <zircon/process.h>
#include <zircon/processargs.h>
#include <zircon/syscalls.h>
#include "base/base_paths_fuchsia.h"
+#include "base/files/scoped_temp_dir.h"
+#include "base/fuchsia/file_utils.h"
+#include "base/fuchsia/fuchsia_logging.h"
#endif
namespace base {
@@ -82,12 +87,15 @@ const char kSignalFileSlow[] = "SlowChildProcess.die";
const char kSignalFileKill[] = "KilledChildProcess.die";
const char kTestHelper[] = "test_child_process";
-#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+#if defined(OS_POSIX)
const char kSignalFileTerm[] = "TerminatedChildProcess.die";
#endif
#if defined(OS_FUCHSIA)
const char kSignalFileClone[] = "ClonedTmpDir.die";
+const char kDataDirHasStaged[] = "DataDirHasStaged.die";
+const char kFooDirHasStaged[] = "FooDirHasStaged.die";
+const char kFooDirDoesNotHaveStaged[] = "FooDirDoesNotHaveStaged.die";
#endif
#if defined(OS_WIN)
@@ -223,6 +231,100 @@ TEST_F(ProcessUtilTest, DISABLED_GetTerminationStatusExit) {
#if defined(OS_FUCHSIA)
+MULTIPROCESS_TEST_MAIN(CheckDataDirHasStaged) {
+ if (!PathExists(base::FilePath("/data/staged"))) {
+ return 1;
+ }
+ WaitToDie(ProcessUtilTest::GetSignalFilePath(kDataDirHasStaged).c_str());
+ return kSuccess;
+}
+
+// Test transferred paths override cloned paths.
+TEST_F(ProcessUtilTest, HandleTransfersOverrideClones) {
+ const std::string signal_file =
+ ProcessUtilTest::GetSignalFilePath(kDataDirHasStaged);
+ remove(signal_file.c_str());
+
+ // Create a tempdir with "staged" as its contents.
+ ScopedTempDir tmpdir_with_staged;
+ ASSERT_TRUE(tmpdir_with_staged.CreateUniqueTempDir());
+ {
+ base::FilePath staged_file_path =
+ tmpdir_with_staged.GetPath().Append("staged");
+ base::File staged_file(staged_file_path,
+ base::File::FLAG_CREATE | base::File::FLAG_WRITE);
+ ASSERT_TRUE(staged_file.created());
+ staged_file.Close();
+ }
+
+ base::LaunchOptions options;
+ options.spawn_flags = FDIO_SPAWN_CLONE_STDIO;
+
+ // Attach the tempdir to "data", but also try to duplicate the existing "data"
+ // directory.
+ options.paths_to_clone.push_back(base::FilePath("/data"));
+ options.paths_to_clone.push_back(base::FilePath("/tmp"));
+ options.paths_to_transfer.push_back(
+ {FilePath("/data"),
+ fuchsia::GetHandleFromFile(
+ base::File(base::FilePath(tmpdir_with_staged.GetPath()),
+ base::File::FLAG_OPEN | base::File::FLAG_READ))
+ .release()});
+
+ // Verify from that "/data/staged" exists from the child process' perspective.
+ Process process(SpawnChildWithOptions("CheckDataDirHasStaged", options));
+ ASSERT_TRUE(process.IsValid());
+ SignalChildren(signal_file.c_str());
+
+ int exit_code = 42;
+ EXPECT_TRUE(process.WaitForExit(&exit_code));
+ EXPECT_EQ(kSuccess, exit_code);
+}
+
+MULTIPROCESS_TEST_MAIN(CheckMountedDir) {
+ if (!PathExists(base::FilePath("/foo/staged"))) {
+ return 1;
+ }
+ WaitToDie(ProcessUtilTest::GetSignalFilePath(kFooDirHasStaged).c_str());
+ return kSuccess;
+}
+
+// Test that we can install an opaque handle in the child process' namespace.
+TEST_F(ProcessUtilTest, TransferHandleToPath) {
+ const std::string signal_file =
+ ProcessUtilTest::GetSignalFilePath(kFooDirHasStaged);
+ remove(signal_file.c_str());
+
+ // Create a tempdir with "staged" as its contents.
+ ScopedTempDir new_tmpdir;
+ ASSERT_TRUE(new_tmpdir.CreateUniqueTempDir());
+ base::FilePath staged_file_path = new_tmpdir.GetPath().Append("staged");
+ base::File staged_file(staged_file_path,
+ base::File::FLAG_CREATE | base::File::FLAG_WRITE);
+ ASSERT_TRUE(staged_file.created());
+ staged_file.Close();
+
+ // Mount the tempdir to "/foo".
+ zx::handle tmp_handle = fuchsia::GetHandleFromFile(
+ base::File(base::FilePath(new_tmpdir.GetPath()),
+ base::File::FLAG_OPEN | base::File::FLAG_READ));
+ ASSERT_TRUE(tmp_handle.is_valid());
+ LaunchOptions options;
+ options.paths_to_clone.push_back(base::FilePath("/tmp"));
+ options.paths_to_transfer.push_back(
+ {base::FilePath("/foo"), tmp_handle.release()});
+ options.spawn_flags = FDIO_SPAWN_CLONE_STDIO;
+
+ // Verify from that "/foo/staged" exists from the child process' perspective.
+ Process process(SpawnChildWithOptions("CheckMountedDir", options));
+ ASSERT_TRUE(process.IsValid());
+ SignalChildren(signal_file.c_str());
+
+ int exit_code = 42;
+ EXPECT_TRUE(process.WaitForExit(&exit_code));
+ EXPECT_EQ(kSuccess, exit_code);
+}
+
MULTIPROCESS_TEST_MAIN(CheckTmpFileExists) {
// Look through the filesystem to ensure that no other directories
// besides "tmp" are in the namespace.
@@ -241,14 +343,14 @@ MULTIPROCESS_TEST_MAIN(CheckTmpFileExists) {
return kSuccess;
}
-TEST_F(ProcessUtilTest, SelectivelyClonedDir) {
+TEST_F(ProcessUtilTest, CloneTmp) {
const std::string signal_file =
ProcessUtilTest::GetSignalFilePath(kSignalFileClone);
remove(signal_file.c_str());
LaunchOptions options;
- options.paths_to_map.push_back(base::FilePath("/tmp"));
- options.clone_flags = LP_CLONE_FDIO_STDIO;
+ options.paths_to_clone.push_back(base::FilePath("/tmp"));
+ options.spawn_flags = FDIO_SPAWN_CLONE_STDIO;
Process process(SpawnChildWithOptions("CheckTmpFileExists", options));
ASSERT_TRUE(process.IsValid());
@@ -260,6 +362,45 @@ TEST_F(ProcessUtilTest, SelectivelyClonedDir) {
EXPECT_EQ(kSuccess, exit_code);
}
+MULTIPROCESS_TEST_MAIN(CheckMountedDirDoesNotExist) {
+ if (PathExists(base::FilePath("/foo"))) {
+ return 1;
+ }
+ WaitToDie(
+ ProcessUtilTest::GetSignalFilePath(kFooDirDoesNotHaveStaged).c_str());
+ return kSuccess;
+}
+
+TEST_F(ProcessUtilTest, TransferInvalidHandleFails) {
+ LaunchOptions options;
+ options.paths_to_clone.push_back(base::FilePath("/tmp"));
+ options.paths_to_transfer.push_back(
+ {base::FilePath("/foo"), ZX_HANDLE_INVALID});
+ options.spawn_flags = FDIO_SPAWN_CLONE_STDIO;
+
+ // Verify that the process is never constructed.
+ const std::string signal_file =
+ ProcessUtilTest::GetSignalFilePath(kFooDirDoesNotHaveStaged);
+ remove(signal_file.c_str());
+ Process process(
+ SpawnChildWithOptions("CheckMountedDirDoesNotExist", options));
+ ASSERT_FALSE(process.IsValid());
+}
+
+TEST_F(ProcessUtilTest, CloneInvalidDirFails) {
+ const std::string signal_file =
+ ProcessUtilTest::GetSignalFilePath(kSignalFileClone);
+ remove(signal_file.c_str());
+
+ LaunchOptions options;
+ options.paths_to_clone.push_back(base::FilePath("/tmp"));
+ options.paths_to_clone.push_back(base::FilePath("/definitely_not_a_dir"));
+ options.spawn_flags = FDIO_SPAWN_CLONE_STDIO;
+
+ Process process(SpawnChildWithOptions("CheckTmpFileExists", options));
+ ASSERT_FALSE(process.IsValid());
+}
+
// Test that we can clone other directories. CheckTmpFileExists will return an
// error code if it detects a directory other than "/tmp", so we can use that as
// a signal that it successfully detected another entry in the root namespace.
@@ -269,9 +410,9 @@ TEST_F(ProcessUtilTest, CloneAlternateDir) {
remove(signal_file.c_str());
LaunchOptions options;
- options.paths_to_map.push_back(base::FilePath("/tmp"));
- options.paths_to_map.push_back(base::FilePath("/data"));
- options.clone_flags = LP_CLONE_FDIO_STDIO;
+ options.paths_to_clone.push_back(base::FilePath("/tmp"));
+ options.paths_to_clone.push_back(base::FilePath("/data"));
+ options.spawn_flags = FDIO_SPAWN_CLONE_STDIO;
Process process(SpawnChildWithOptions("CheckTmpFileExists", options));
ASSERT_TRUE(process.IsValid());
@@ -283,6 +424,55 @@ TEST_F(ProcessUtilTest, CloneAlternateDir) {
EXPECT_EQ(1, exit_code);
}
+TEST_F(ProcessUtilTest, HandlesToTransferClosedOnSpawnFailure) {
+ zx::handle handles[2];
+ zx_status_t result = zx_channel_create(0, handles[0].reset_and_get_address(),
+ handles[1].reset_and_get_address());
+ ZX_CHECK(ZX_OK == result, result) << "zx_channel_create";
+
+ LaunchOptions options;
+ options.handles_to_transfer.push_back({0, handles[0].get()});
+
+ // Launch a non-existent binary, causing fdio_spawn() to fail.
+ CommandLine command_line(FilePath(
+ FILE_PATH_LITERAL("💩magical_filename_that_will_never_exist_ever")));
+ Process process(LaunchProcess(command_line, options));
+ ASSERT_FALSE(process.IsValid());
+
+ // If LaunchProcess did its job then handles[0] is no longer valid, and
+ // handles[1] should observe a channel-closed signal.
+ EXPECT_EQ(
+ zx_object_wait_one(handles[1].get(), ZX_CHANNEL_PEER_CLOSED, 0, nullptr),
+ ZX_OK);
+ EXPECT_EQ(ZX_ERR_BAD_HANDLE, zx_handle_close(handles[0].get()));
+ ignore_result(handles[0].release());
+}
+
+TEST_F(ProcessUtilTest, HandlesToTransferClosedOnBadPathToMapFailure) {
+ zx::handle handles[2];
+ zx_status_t result = zx_channel_create(0, handles[0].reset_and_get_address(),
+ handles[1].reset_and_get_address());
+ ZX_CHECK(ZX_OK == result, result) << "zx_channel_create";
+
+ LaunchOptions options;
+ options.handles_to_transfer.push_back({0, handles[0].get()});
+ options.spawn_flags = options.spawn_flags & ~FDIO_SPAWN_CLONE_NAMESPACE;
+ options.paths_to_clone.emplace_back(
+ "💩magical_path_that_will_never_exist_ever");
+
+ // LaunchProces should fail to open() the path_to_map, and fail before
+ // fdio_spawn().
+ Process process(LaunchProcess(CommandLine(FilePath()), options));
+ ASSERT_FALSE(process.IsValid());
+
+ // If LaunchProcess did its job then handles[0] is no longer valid, and
+ // handles[1] should observe a channel-closed signal.
+ EXPECT_EQ(
+ zx_object_wait_one(handles[1].get(), ZX_CHANNEL_PEER_CLOSED, 0, nullptr),
+ ZX_OK);
+ EXPECT_EQ(ZX_ERR_BAD_HANDLE, zx_handle_close(handles[0].get()));
+ ignore_result(handles[0].release());
+}
#endif // defined(OS_FUCHSIA)
// On Android SpawnProcess() doesn't use LaunchProcess() and doesn't support
@@ -351,7 +541,7 @@ const char kSignalFileCrash[] = "CrashingChildProcess.die";
MULTIPROCESS_TEST_MAIN(CrashingChildProcess) {
WaitToDie(ProcessUtilTest::GetSignalFilePath(kSignalFileCrash).c_str());
-#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+#if defined(OS_POSIX)
// Have to disable to signal handler for segv so we can get a crash
// instead of an abnormal termination through the crash dump handler.
::signal(SIGSEGV, SIG_DFL);
@@ -364,7 +554,9 @@ MULTIPROCESS_TEST_MAIN(CrashingChildProcess) {
// This test intentionally crashes, so we don't need to run it under
// AddressSanitizer.
-#if defined(ADDRESS_SANITIZER)
+#if defined(ADDRESS_SANITIZER) || defined(OS_FUCHSIA)
+// TODO(crbug.com/753490): Access to the process termination reason is not
+// implemented in Fuchsia.
#define MAYBE_GetTerminationStatusCrash DISABLED_GetTerminationStatusCrash
#else
#define MAYBE_GetTerminationStatusCrash GetTerminationStatusCrash
@@ -389,7 +581,7 @@ TEST_F(ProcessUtilTest, MAYBE_GetTerminationStatusCrash) {
#if defined(OS_WIN)
EXPECT_EQ(static_cast<int>(0xc0000005), exit_code);
-#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+#elif defined(OS_POSIX)
int signaled = WIFSIGNALED(exit_code);
EXPECT_NE(0, signaled);
int signal = WTERMSIG(exit_code);
@@ -408,14 +600,16 @@ MULTIPROCESS_TEST_MAIN(KilledChildProcess) {
// Kill ourselves.
HANDLE handle = ::OpenProcess(PROCESS_ALL_ACCESS, 0, ::GetCurrentProcessId());
::TerminateProcess(handle, kExpectedKilledExitCode);
-#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+#elif defined(OS_POSIX)
// Send a SIGKILL to this process, just like the OOM killer would.
::kill(getpid(), SIGKILL);
+#elif defined(OS_FUCHSIA)
+ zx_task_kill(zx_process_self());
#endif
return 1;
}
-#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+#if defined(OS_POSIX)
MULTIPROCESS_TEST_MAIN(TerminatedChildProcess) {
WaitToDie(ProcessUtilTest::GetSignalFilePath(kSignalFileTerm).c_str());
// Send a SIGTERM to this process.
@@ -424,7 +618,14 @@ MULTIPROCESS_TEST_MAIN(TerminatedChildProcess) {
}
#endif // defined(OS_POSIX) || defined(OS_FUCHSIA)
-TEST_F(ProcessUtilTest, GetTerminationStatusSigKill) {
+#if defined(OS_FUCHSIA)
+// TODO(crbug.com/753490): Access to the process termination reason is not
+// implemented in Fuchsia.
+#define MAYBE_GetTerminationStatusSigKill DISABLED_GetTerminationStatusSigKill
+#else
+#define MAYBE_GetTerminationStatusSigKill GetTerminationStatusSigKill
+#endif
+TEST_F(ProcessUtilTest, MAYBE_GetTerminationStatusSigKill) {
const std::string signal_file =
ProcessUtilTest::GetSignalFilePath(kSignalFileKill);
remove(signal_file.c_str());
@@ -448,7 +649,7 @@ TEST_F(ProcessUtilTest, GetTerminationStatusSigKill) {
#if defined(OS_WIN)
EXPECT_EQ(kExpectedKilledExitCode, exit_code);
-#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+#elif defined(OS_POSIX)
int signaled = WIFSIGNALED(exit_code);
EXPECT_NE(0, signaled);
int signal = WTERMSIG(exit_code);
@@ -457,7 +658,10 @@ TEST_F(ProcessUtilTest, GetTerminationStatusSigKill) {
remove(signal_file.c_str());
}
-#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+#if defined(OS_POSIX)
+// TODO(crbug.com/753490): Access to the process termination reason is not
+// implemented in Fuchsia. Unix signals are not implemented in Fuchsia so this
+// test might not be relevant anyway.
TEST_F(ProcessUtilTest, GetTerminationStatusSigTerm) {
const std::string signal_file =
ProcessUtilTest::GetSignalFilePath(kSignalFileTerm);
@@ -482,7 +686,7 @@ TEST_F(ProcessUtilTest, GetTerminationStatusSigTerm) {
EXPECT_EQ(SIGTERM, signal);
remove(signal_file.c_str());
}
-#endif // defined(OS_POSIX) || defined(OS_FUCHSIA)
+#endif // defined(OS_POSIX)
TEST_F(ProcessUtilTest, EnsureTerminationUndying) {
test::ScopedTaskEnvironment task_environment;
@@ -492,9 +696,22 @@ TEST_F(ProcessUtilTest, EnsureTerminationUndying) {
EnsureProcessTerminated(child_process.Duplicate());
+#if defined(OS_POSIX)
+ errno = 0;
+#endif // defined(OS_POSIX)
+
// Allow a generous timeout, to cope with slow/loaded test bots.
- EXPECT_TRUE(child_process.WaitForExitWithTimeout(
- TestTimeouts::action_max_timeout(), nullptr));
+ bool did_exit = child_process.WaitForExitWithTimeout(
+ TestTimeouts::action_max_timeout(), nullptr);
+
+#if defined(OS_POSIX)
+ // Both EnsureProcessTerminated() and WaitForExitWithTimeout() will call
+ // waitpid(). One will succeed, and the other will fail with ECHILD. If our
+ // wait failed then check for ECHILD, and assumed |did_exit| in that case.
+ did_exit = did_exit || (errno == ECHILD);
+#endif // defined(OS_POSIX)
+
+ EXPECT_TRUE(did_exit);
}
MULTIPROCESS_TEST_MAIN(process_util_test_never_die) {
@@ -880,7 +1097,7 @@ TEST_F(ProcessUtilTest, FDRemappingIncludesStdio) {
const uint16_t kStartupHandleId = 43;
MULTIPROCESS_TEST_MAIN(ProcessUtilsVerifyHandle) {
zx_handle_t handle =
- zx_get_startup_handle(PA_HND(PA_USER0, kStartupHandleId));
+ zx_take_startup_handle(PA_HND(PA_USER0, kStartupHandleId));
CHECK_NE(ZX_HANDLE_INVALID, handle);
// Write to the pipe so the parent process can observe output.
diff --git a/chromium/base/profiler/native_stack_sampler.h b/chromium/base/profiler/native_stack_sampler.h
index ebd7c3c4b81..5d7e9b07715 100644
--- a/chromium/base/profiler/native_stack_sampler.h
+++ b/chromium/base/profiler/native_stack_sampler.h
@@ -17,8 +17,8 @@ namespace base {
class NativeStackSamplerTestDelegate;
// NativeStackSampler is an implementation detail of StackSamplingProfiler. It
-// abstracts the native implementation required to record a stack sample for a
-// given thread.
+// abstracts the native implementation required to record a set of stack frames
+// for a given thread.
class NativeStackSampler {
public:
// This class contains a buffer for stack copies that can be shared across
@@ -41,22 +41,12 @@ class NativeStackSampler {
DISALLOW_COPY_AND_ASSIGN(StackBuffer);
};
- // The callback type used to add annotations to a sample during collection.
- // This is passed to the native sampler to be applied at the most appropriate
- // time. It is a simple function-pointer because the generated code must be
- // completely predictable and do nothing that could acquire a mutex; a
- // Callback object is code outside the control of this object and could,
- // for example, acquire a mutex as part of allocating memory for a LOG
- // message.
- using AnnotateCallback = void (*)(StackSamplingProfiler::Sample*);
-
virtual ~NativeStackSampler();
- // Creates a stack sampler that records samples for |thread_handle|. Returns
- // null if this platform does not support stack sampling.
+ // Creates a stack sampler that records samples for thread with |thread_id|.
+ // Returns null if this platform does not support stack sampling.
static std::unique_ptr<NativeStackSampler> Create(
PlatformThreadId thread_id,
- AnnotateCallback annotator,
NativeStackSamplerTestDelegate* test_delegate);
// Gets the required size of the stack buffer.
@@ -69,18 +59,13 @@ class NativeStackSampler {
// The following functions are all called on the SamplingThread (not the
// thread being sampled).
- // Notifies the sampler that we're starting to record a new profile. Modules
- // shared across samples in the profile should be recorded in |modules|.
- virtual void ProfileRecordingStarting(
- std::vector<StackSamplingProfiler::Module>* modules) = 0;
-
- // Records a stack sample to |sample|.
- virtual void RecordStackSample(StackBuffer* stackbuffer,
- StackSamplingProfiler::Sample* sample) = 0;
+ // Notifies the sampler that we're starting to record a new profile.
+ virtual void ProfileRecordingStarting() = 0;
- // Notifies the sampler that we've stopped recording the current
- // profile.
- virtual void ProfileRecordingStopped(StackBuffer* stackbuffer) = 0;
+ // Records a set of internal frames and returns them.
+ virtual std::vector<StackSamplingProfiler::InternalFrame> RecordStackFrames(
+ StackBuffer* stackbuffer,
+ StackSamplingProfiler::ProfileBuilder* profile_builder) = 0;
protected:
NativeStackSampler();
diff --git a/chromium/base/profiler/native_stack_sampler_mac.cc b/chromium/base/profiler/native_stack_sampler_mac.cc
index a161173f060..d45c7a8c836 100644
--- a/chromium/base/profiler/native_stack_sampler_mac.cc
+++ b/chromium/base/profiler/native_stack_sampler_mac.cc
@@ -19,7 +19,6 @@
#include <sys/syslimits.h>
#include <algorithm>
-#include <map>
#include <memory>
#include "base/logging.h"
@@ -34,19 +33,32 @@ void _sigtramp(int, int, struct sigset*);
namespace base {
+using Frame = StackSamplingProfiler::Frame;
+using InternalFrame = StackSamplingProfiler::InternalFrame;
+using Module = StackSamplingProfiler::Module;
+using InternalModule = StackSamplingProfiler::InternalModule;
+using ProfileBuilder = StackSamplingProfiler::ProfileBuilder;
+
namespace {
-// Maps a module's address range (half-open) in memory to an index in a separate
-// data structure.
-struct ModuleIndex {
- ModuleIndex(uintptr_t start, uintptr_t end, size_t idx)
- : base_address(start), end_address(end), index(idx){};
+// ModuleCacheEntry records a module's address range (half-open) in memory and
+// the module itself.
+struct ModuleCacheEntry {
+ ModuleCacheEntry(uintptr_t start,
+ uintptr_t end,
+ InternalModule internal_module)
+ : base_address(start),
+ end_address(end),
+ internal_module(std::move(internal_module)){};
+
// Base address of the represented module.
uintptr_t base_address;
+
// First address off the end of the represented module.
uintptr_t end_address;
- // An index to the represented module in a separate container.
- size_t index;
+
+ // Module information.
+ InternalModule internal_module;
};
// Module identifiers ---------------------------------------------------------
@@ -66,9 +78,11 @@ std::string GetUniqueId(const void* module_addr) {
size_t offset = sizeof(mach_header_64);
size_t offset_limit = sizeof(mach_header_64) + mach_header->sizeofcmds;
- for (uint32_t i = 0; (i < mach_header->ncmds) &&
- (offset + sizeof(load_command) < offset_limit);
- ++i) {
+
+ for (uint32_t i = 0; i < mach_header->ncmds; ++i) {
+ if (offset + sizeof(load_command) >= offset_limit)
+ return std::string();
+
const load_command* current_cmd = reinterpret_cast<const load_command*>(
reinterpret_cast<const uint8_t*>(mach_header) + offset);
@@ -108,41 +122,6 @@ size_t GetModuleTextSize(const void* module_addr) {
return module_size;
}
-// Gets the index for the Module containing |instruction_pointer| in
-// |modules|, adding it if it's not already present. Returns
-// StackSamplingProfiler::Frame::kUnknownModuleIndex if no Module can be
-// determined for |module|.
-size_t GetModuleIndex(const uintptr_t instruction_pointer,
- std::vector<StackSamplingProfiler::Module>* modules,
- std::vector<ModuleIndex>* profile_module_index) {
- // Check if |instruction_pointer| is in the address range of a module we've
- // already seen.
- auto module_index =
- std::find_if(profile_module_index->begin(), profile_module_index->end(),
- [instruction_pointer](const ModuleIndex& index) {
- return instruction_pointer >= index.base_address &&
- instruction_pointer < index.end_address;
- });
- if (module_index != profile_module_index->end()) {
- return module_index->index;
- }
- Dl_info inf;
- if (!dladdr(reinterpret_cast<const void*>(instruction_pointer), &inf))
- return StackSamplingProfiler::Frame::kUnknownModuleIndex;
-
- StackSamplingProfiler::Module module(
- reinterpret_cast<uintptr_t>(inf.dli_fbase), GetUniqueId(inf.dli_fbase),
- base::FilePath(inf.dli_fname));
- modules->push_back(module);
-
- uintptr_t base_module_address = reinterpret_cast<uintptr_t>(inf.dli_fbase);
- size_t index = modules->size() - 1;
- profile_module_index->emplace_back(
- base_module_address,
- base_module_address + GetModuleTextSize(inf.dli_fbase), index);
- return index;
-}
-
// Stack walking --------------------------------------------------------------
// Fills |state| with |target_thread|'s context.
@@ -151,8 +130,7 @@ size_t GetModuleIndex(const uintptr_t instruction_pointer,
// that no shared resources (e.g. memory allocators) are used for the duration
// of this function.
bool GetThreadState(thread_act_t target_thread, x86_thread_state64_t* state) {
- mach_msg_type_number_t count =
- static_cast<mach_msg_type_number_t>(x86_THREAD_STATE64_COUNT);
+ auto count = static_cast<mach_msg_type_number_t>(x86_THREAD_STATE64_COUNT);
return thread_get_state(target_thread, x86_THREAD_STATE64,
reinterpret_cast<thread_state_t>(state),
&count) == KERN_SUCCESS;
@@ -169,17 +147,13 @@ uintptr_t RewritePointerIfInOriginalStack(
const uintptr_t* original_stack_top,
uintptr_t* stack_copy_bottom,
uintptr_t pointer) {
- uintptr_t original_stack_bottom_int =
+ auto original_stack_bottom_int =
reinterpret_cast<uintptr_t>(original_stack_bottom);
- uintptr_t original_stack_top_int =
- reinterpret_cast<uintptr_t>(original_stack_top);
- uintptr_t stack_copy_bottom_int =
- reinterpret_cast<uintptr_t>(stack_copy_bottom);
+ auto original_stack_top_int = reinterpret_cast<uintptr_t>(original_stack_top);
+ auto stack_copy_bottom_int = reinterpret_cast<uintptr_t>(stack_copy_bottom);
- if ((pointer < original_stack_bottom_int) ||
- (pointer >= original_stack_top_int)) {
+ if (pointer < original_stack_bottom_int || pointer >= original_stack_top_int)
return pointer;
- }
return stack_copy_bottom_int + (pointer - original_stack_bottom_int);
}
@@ -291,65 +265,12 @@ bool HasValidRbp(unw_cursor_t* unwind_cursor, uintptr_t stack_top) {
unw_get_reg(unwind_cursor, UNW_X86_64_RSP, &rsp);
unw_get_reg(unwind_cursor, UNW_X86_64_RBP, &rbp);
uint32_t offset = GetFrameOffset(proc_info.format) * sizeof(unw_word_t);
- if (rbp < offset || (rbp - offset) < rsp || rbp > stack_top) {
+ if (rbp < offset || (rbp - offset) < rsp || rbp > stack_top)
return false;
- }
}
return true;
}
-// Walks the stack represented by |unwind_context|, calling back to the provided
-// lambda for each frame. Returns false if an error occurred, otherwise returns
-// true.
-template <typename StackFrameCallback, typename ContinueUnwindPredicate>
-bool WalkStackFromContext(
- unw_context_t* unwind_context,
- size_t* frame_count,
- std::vector<StackSamplingProfiler::Module>* current_modules,
- std::vector<ModuleIndex>* profile_module_index,
- const StackFrameCallback& callback,
- const ContinueUnwindPredicate& continue_unwind) {
- unw_cursor_t unwind_cursor;
- unw_init_local(&unwind_cursor, unwind_context);
-
- int step_result;
- unw_word_t rip;
- do {
- ++(*frame_count);
- unw_get_reg(&unwind_cursor, UNW_REG_IP, &rip);
-
- // Ensure IP is in a module.
- //
- // Frameless unwinding (non-DWARF) works by fetching the function's
- // stack size from the unwind encoding or stack, and adding it to the
- // stack pointer to determine the function's return address.
- //
- // If we're in a function prologue or epilogue, the actual stack size
- // may be smaller than it will be during the normal course of execution.
- // When libunwind adds the expected stack size, it will look for the
- // return address in the wrong place. This check should ensure that we
- // bail before trying to deref a bad IP obtained this way in the previous
- // frame.
- size_t module_index =
- GetModuleIndex(rip, current_modules, profile_module_index);
- if (module_index == StackSamplingProfiler::Frame::kUnknownModuleIndex) {
- return false;
- }
-
- callback(static_cast<uintptr_t>(rip), module_index);
-
- if (!continue_unwind(&unwind_cursor))
- return false;
-
- step_result = unw_step(&unwind_cursor);
- } while (step_result > 0);
-
- if (step_result != 0)
- return false;
-
- return true;
-}
-
const char* LibSystemKernelName() {
static char path[PATH_MAX];
static char* name = nullptr;
@@ -369,7 +290,7 @@ const char* LibSystemKernelName() {
}
void GetSigtrampRange(uintptr_t* start, uintptr_t* end) {
- uintptr_t address = reinterpret_cast<uintptr_t>(&_sigtramp);
+ auto address = reinterpret_cast<uintptr_t>(&_sigtramp);
DCHECK(address != 0);
*start = address;
@@ -389,57 +310,6 @@ void GetSigtrampRange(uintptr_t* start, uintptr_t* end) {
*end = info.end_ip;
}
-// Walks the stack represented by |thread_state|, calling back to the provided
-// lambda for each frame.
-template <typename StackFrameCallback, typename ContinueUnwindPredicate>
-void WalkStack(const x86_thread_state64_t& thread_state,
- std::vector<StackSamplingProfiler::Module>* current_modules,
- std::vector<ModuleIndex>* profile_module_index,
- const StackFrameCallback& callback,
- const ContinueUnwindPredicate& continue_unwind) {
- size_t frame_count = 0;
- // This uses libunwind to walk the stack. libunwind is designed to be used for
- // a thread to walk its own stack. This creates two problems.
-
- // Problem 1: There is no official way to create a unw_context other than to
- // create it from the current state of the current thread's stack. To get
- // around this, forge a context. A unw_context is just a copy of the 16 main
- // registers followed by the instruction pointer, nothing more.
- // Coincidentally, the first 17 items of the x86_thread_state64_t type are
- // exactly those registers in exactly the same order, so just bulk copy them
- // over.
- unw_context_t unwind_context;
- memcpy(&unwind_context, &thread_state, sizeof(uintptr_t) * 17);
- bool result =
- WalkStackFromContext(&unwind_context, &frame_count, current_modules,
- profile_module_index, callback, continue_unwind);
-
- if (!result)
- return;
-
- if (frame_count == 1) {
- // Problem 2: Because libunwind is designed to be triggered by user code on
- // their own thread, if it hits a library that has no unwind info for the
- // function that is being executed, it just stops. This isn't a problem in
- // the normal case, but in this case, it's quite possible that the stack
- // being walked is stopped in a function that bridges to the kernel and thus
- // is missing the unwind info.
-
- // For now, just unwind the single case where the thread is stopped in a
- // function in libsystem_kernel.
- uint64_t& rsp = unwind_context.data[7];
- uint64_t& rip = unwind_context.data[16];
- Dl_info info;
- if (dladdr(reinterpret_cast<void*>(rip), &info) != 0 &&
- strcmp(info.dli_fname, LibSystemKernelName()) == 0) {
- rip = *reinterpret_cast<uint64_t*>(rsp);
- rsp += 8;
- WalkStackFromContext(&unwind_context, &frame_count, current_modules,
- profile_module_index, callback, continue_unwind);
- }
- }
-}
-
// ScopedSuspendThread --------------------------------------------------------
// Suspends a thread for the lifetime of the object.
@@ -471,40 +341,46 @@ class ScopedSuspendThread {
class NativeStackSamplerMac : public NativeStackSampler {
public:
NativeStackSamplerMac(mach_port_t thread_port,
- AnnotateCallback annotator,
NativeStackSamplerTestDelegate* test_delegate);
~NativeStackSamplerMac() override;
// StackSamplingProfiler::NativeStackSampler:
- void ProfileRecordingStarting(
- std::vector<StackSamplingProfiler::Module>* modules) override;
- void RecordStackSample(StackBuffer* stack_buffer,
- StackSamplingProfiler::Sample* sample) override;
- void ProfileRecordingStopped(StackBuffer* stack_buffer) override;
+ void ProfileRecordingStarting() override;
+ std::vector<InternalFrame> RecordStackFrames(
+ StackBuffer* stack_buffer,
+ ProfileBuilder* profile_builder) override;
private:
- // Suspends the thread with |thread_port_|, copies its stack and resumes the
- // thread, then records the stack frames and associated modules into |sample|.
- void SuspendThreadAndRecordStack(StackBuffer* stack_buffer,
- StackSamplingProfiler::Sample* sample);
+ // Returns the InternalModule containing |instruction_pointer|, adding it to
+ // module_cache_entry_ if it's not already present.
+ InternalModule GetInternalModule(uintptr_t instruction_pointer);
+
+ // Walks the stack represented by |unwind_context|, calling back to the
+ // provided lambda for each frame. Returns false if an error occurred,
+ // otherwise returns true.
+ template <typename StackFrameCallback, typename ContinueUnwindPredicate>
+ bool WalkStackFromContext(unw_context_t* unwind_context,
+ size_t* frame_count,
+ const StackFrameCallback& callback,
+ const ContinueUnwindPredicate& continue_unwind);
+
+ // Walks the stack represented by |thread_state|, calling back to the
+ // provided lambda for each frame.
+ template <typename StackFrameCallback, typename ContinueUnwindPredicate>
+ void WalkStack(const x86_thread_state64_t& thread_state,
+ const StackFrameCallback& callback,
+ const ContinueUnwindPredicate& continue_unwind);
// Weak reference: Mach port for thread being profiled.
mach_port_t thread_port_;
- const AnnotateCallback annotator_;
-
NativeStackSamplerTestDelegate* const test_delegate_;
// The stack base address corresponding to |thread_handle_|.
const void* const thread_stack_base_address_;
- // Weak. Points to the modules associated with the profile being recorded
- // between ProfileRecordingStarting() and ProfileRecordingStopped().
- std::vector<StackSamplingProfiler::Module>* current_modules_ = nullptr;
-
- // Maps a module's address range to the corresponding Module's index within
- // current_modules_.
- std::vector<ModuleIndex> profile_module_index_;
+ // Maps a module's address range to the module.
+ std::vector<ModuleCacheEntry> module_cache_entry_;
// The address range of |_sigtramp|, the signal trampoline function.
uintptr_t sigtramp_start_;
@@ -515,15 +391,11 @@ class NativeStackSamplerMac : public NativeStackSampler {
NativeStackSamplerMac::NativeStackSamplerMac(
mach_port_t thread_port,
- AnnotateCallback annotator,
NativeStackSamplerTestDelegate* test_delegate)
: thread_port_(thread_port),
- annotator_(annotator),
test_delegate_(test_delegate),
thread_stack_base_address_(
pthread_get_stackaddr_np(pthread_from_mach_thread_np(thread_port))) {
- DCHECK(annotator_);
-
GetSigtrampRange(&sigtramp_start_, &sigtramp_end_);
// This class suspends threads, and those threads might be suspended in dyld.
// Therefore, for all the system functions that might be linked in dynamically
@@ -535,29 +407,17 @@ NativeStackSamplerMac::NativeStackSamplerMac(
NativeStackSamplerMac::~NativeStackSamplerMac() {}
-void NativeStackSamplerMac::ProfileRecordingStarting(
- std::vector<StackSamplingProfiler::Module>* modules) {
- current_modules_ = modules;
- profile_module_index_.clear();
-}
-
-void NativeStackSamplerMac::RecordStackSample(
- StackBuffer* stack_buffer,
- StackSamplingProfiler::Sample* sample) {
- DCHECK(current_modules_);
-
- SuspendThreadAndRecordStack(stack_buffer, sample);
+void NativeStackSamplerMac::ProfileRecordingStarting() {
+ module_cache_entry_.clear();
}
-void NativeStackSamplerMac::ProfileRecordingStopped(StackBuffer* stack_buffer) {
- current_modules_ = nullptr;
-}
-
-void NativeStackSamplerMac::SuspendThreadAndRecordStack(
+std::vector<InternalFrame> NativeStackSamplerMac::RecordStackFrames(
StackBuffer* stack_buffer,
- StackSamplingProfiler::Sample* sample) {
+ ProfileBuilder* profile_builder) {
x86_thread_state64_t thread_state;
+ const std::vector<InternalFrame> empty_internal_frames;
+
// Copy the stack.
uintptr_t new_stack_top = 0;
@@ -568,21 +428,21 @@ void NativeStackSamplerMac::SuspendThreadAndRecordStack(
// default heap acquired by the target thread before it was suspended.
ScopedSuspendThread suspend_thread(thread_port_);
if (!suspend_thread.was_successful())
- return;
+ return empty_internal_frames;
if (!GetThreadState(thread_port_, &thread_state))
- return;
- uintptr_t stack_top =
- reinterpret_cast<uintptr_t>(thread_stack_base_address_);
+ return empty_internal_frames;
+
+ auto stack_top = reinterpret_cast<uintptr_t>(thread_stack_base_address_);
uintptr_t stack_bottom = thread_state.__rsp;
if (stack_bottom >= stack_top)
- return;
- uintptr_t stack_size = stack_top - stack_bottom;
+ return empty_internal_frames;
+ uintptr_t stack_size = stack_top - stack_bottom;
if (stack_size > stack_buffer->size())
- return;
+ return empty_internal_frames;
- (*annotator_)(sample);
+ profile_builder->RecordAnnotations();
CopyStackAndRewritePointers(
reinterpret_cast<uintptr_t*>(stack_buffer->buffer()),
@@ -600,19 +460,16 @@ void NativeStackSamplerMac::SuspendThreadAndRecordStack(
// Reserve enough memory for most stacks, to avoid repeated allocations.
// Approximately 99.9% of recorded stacks are 128 frames or fewer.
- sample->frames.reserve(128);
-
- auto* current_modules = current_modules_;
- auto* profile_module_index = &profile_module_index_;
+ std::vector<InternalFrame> internal_frames;
+ internal_frames.reserve(128);
// Avoid an out-of-bounds read bug in libunwind that can crash us in some
// circumstances. If we're subject to that case, just record the first frame
// and bail. See MayTriggerUnwInitLocalCrash for details.
uintptr_t rip = thread_state.__rip;
if (MayTriggerUnwInitLocalCrash(rip)) {
- sample->frames.emplace_back(
- rip, GetModuleIndex(rip, current_modules, profile_module_index));
- return;
+ internal_frames.emplace_back(rip, GetInternalModule(rip));
+ return internal_frames;
}
const auto continue_predicate = [this,
@@ -631,22 +488,142 @@ void NativeStackSamplerMac::SuspendThreadAndRecordStack(
return HasValidRbp(unwind_cursor, new_stack_top);
};
- WalkStack(thread_state, current_modules, profile_module_index,
- [sample, current_modules, profile_module_index](
- uintptr_t frame_ip, size_t module_index) {
- sample->frames.emplace_back(frame_ip, module_index);
- },
- continue_predicate);
+ WalkStack(
+ thread_state,
+ [&internal_frames](uintptr_t frame_ip, InternalModule internal_module) {
+ internal_frames.emplace_back(frame_ip, std::move(internal_module));
+ },
+ continue_predicate);
+
+ return internal_frames;
+}
+
+InternalModule NativeStackSamplerMac::GetInternalModule(
+ uintptr_t instruction_pointer) {
+ // Check if |instruction_pointer| is in the address range of a module we've
+ // already seen.
+ auto loc =
+ std::find_if(module_cache_entry_.begin(), module_cache_entry_.end(),
+ [instruction_pointer](const ModuleCacheEntry& entry) {
+ return instruction_pointer >= entry.base_address &&
+ instruction_pointer < entry.end_address;
+ });
+ if (loc != module_cache_entry_.end())
+ return loc->internal_module;
+
+ Dl_info inf;
+ if (!dladdr(reinterpret_cast<const void*>(instruction_pointer), &inf))
+ return InternalModule();
+
+ auto base_module_address = reinterpret_cast<uintptr_t>(inf.dli_fbase);
+
+ InternalModule internal_module(
+ base_module_address, GetUniqueId(inf.dli_fbase), FilePath(inf.dli_fname));
+
+ module_cache_entry_.emplace_back(
+ base_module_address,
+ base_module_address + GetModuleTextSize(inf.dli_fbase), internal_module);
+
+ return internal_module;
+}
+
+template <typename StackFrameCallback, typename ContinueUnwindPredicate>
+bool NativeStackSamplerMac::WalkStackFromContext(
+ unw_context_t* unwind_context,
+ size_t* frame_count,
+ const StackFrameCallback& callback,
+ const ContinueUnwindPredicate& continue_unwind) {
+ unw_cursor_t unwind_cursor;
+ unw_init_local(&unwind_cursor, unwind_context);
+
+ int step_result;
+ unw_word_t rip;
+ do {
+ ++(*frame_count);
+ unw_get_reg(&unwind_cursor, UNW_REG_IP, &rip);
+
+ // Ensure IP is in a module.
+ //
+ // Frameless unwinding (non-DWARF) works by fetching the function's stack
+ // size from the unwind encoding or stack, and adding it to the stack
+ // pointer to determine the function's return address.
+ //
+ // If we're in a function prologue or epilogue, the actual stack size may be
+ // smaller than it will be during the normal course of execution. When
+ // libunwind adds the expected stack size, it will look for the return
+ // address in the wrong place. This check should ensure that we bail before
+ // trying to deref a bad IP obtained this way in the previous frame.
+ InternalModule internal_module = GetInternalModule(rip);
+ if (!internal_module.is_valid)
+ return false;
+
+ callback(static_cast<uintptr_t>(rip), internal_module);
+
+ if (!continue_unwind(&unwind_cursor))
+ return false;
+
+ step_result = unw_step(&unwind_cursor);
+ } while (step_result > 0);
+
+ if (step_result != 0)
+ return false;
+
+ return true;
+}
+
+template <typename StackFrameCallback, typename ContinueUnwindPredicate>
+void NativeStackSamplerMac::WalkStack(
+ const x86_thread_state64_t& thread_state,
+ const StackFrameCallback& callback,
+ const ContinueUnwindPredicate& continue_unwind) {
+ size_t frame_count = 0;
+ // This uses libunwind to walk the stack. libunwind is designed to be used for
+ // a thread to walk its own stack. This creates two problems.
+
+ // Problem 1: There is no official way to create a unw_context other than to
+ // create it from the current state of the current thread's stack. To get
+ // around this, forge a context. A unw_context is just a copy of the 16 main
+ // registers followed by the instruction pointer, nothing more.
+ // Coincidentally, the first 17 items of the x86_thread_state64_t type are
+ // exactly those registers in exactly the same order, so just bulk copy them
+ // over.
+ unw_context_t unwind_context;
+ memcpy(&unwind_context, &thread_state, sizeof(uintptr_t) * 17);
+ bool result = WalkStackFromContext(&unwind_context, &frame_count, callback,
+ continue_unwind);
+
+ if (!result)
+ return;
+
+ if (frame_count == 1) {
+ // Problem 2: Because libunwind is designed to be triggered by user code on
+ // their own thread, if it hits a library that has no unwind info for the
+ // function that is being executed, it just stops. This isn't a problem in
+ // the normal case, but in this case, it's quite possible that the stack
+ // being walked is stopped in a function that bridges to the kernel and thus
+ // is missing the unwind info.
+
+ // For now, just unwind the single case where the thread is stopped in a
+ // function in libsystem_kernel.
+ uint64_t& rsp = unwind_context.data[7];
+ uint64_t& rip = unwind_context.data[16];
+ Dl_info info;
+ if (dladdr(reinterpret_cast<void*>(rip), &info) != 0 &&
+ strcmp(info.dli_fname, LibSystemKernelName()) == 0) {
+ rip = *reinterpret_cast<uint64_t*>(rsp);
+ rsp += 8;
+ WalkStackFromContext(&unwind_context, &frame_count, callback,
+ continue_unwind);
+ }
+ }
}
} // namespace
std::unique_ptr<NativeStackSampler> NativeStackSampler::Create(
PlatformThreadId thread_id,
- AnnotateCallback annotator,
NativeStackSamplerTestDelegate* test_delegate) {
- return std::make_unique<NativeStackSamplerMac>(thread_id, annotator,
- test_delegate);
+ return std::make_unique<NativeStackSamplerMac>(thread_id, test_delegate);
}
size_t NativeStackSampler::GetStackBufferSize() {
diff --git a/chromium/base/profiler/native_stack_sampler_posix.cc b/chromium/base/profiler/native_stack_sampler_posix.cc
index 1055d44e930..fdc18e017ff 100644
--- a/chromium/base/profiler/native_stack_sampler_posix.cc
+++ b/chromium/base/profiler/native_stack_sampler_posix.cc
@@ -8,7 +8,6 @@ namespace base {
std::unique_ptr<NativeStackSampler> NativeStackSampler::Create(
PlatformThreadId thread_id,
- AnnotateCallback annotator,
NativeStackSamplerTestDelegate* test_delegate) {
return std::unique_ptr<NativeStackSampler>();
}
diff --git a/chromium/base/profiler/native_stack_sampler_win.cc b/chromium/base/profiler/native_stack_sampler_win.cc
index b53197d0dc7..b9b1773e896 100644
--- a/chromium/base/profiler/native_stack_sampler_win.cc
+++ b/chromium/base/profiler/native_stack_sampler_win.cc
@@ -4,8 +4,9 @@
#include "base/profiler/native_stack_sampler.h"
-#include <objbase.h>
#include <windows.h>
+
+#include <objbase.h>
#include <stddef.h>
#include <winternl.h>
@@ -20,6 +21,8 @@
#include "base/macros.h"
#include "base/memory/ptr_util.h"
#include "base/profiler/win32_stack_frame_unwinder.h"
+#include "base/stl_util.h"
+#include "base/strings/string16.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "base/strings/utf_string_conversions.h"
@@ -29,6 +32,12 @@
namespace base {
+using Frame = StackSamplingProfiler::Frame;
+using InternalFrame = StackSamplingProfiler::InternalFrame;
+using Module = StackSamplingProfiler::Module;
+using InternalModule = StackSamplingProfiler::InternalModule;
+using ProfileBuilder = StackSamplingProfiler::ProfileBuilder;
+
// Stack recording functions --------------------------------------------------
namespace {
@@ -59,21 +68,18 @@ const TEB* GetThreadEnvironmentBlock(HANDLE thread_handle) {
};
using NtQueryInformationThreadFunction =
- NTSTATUS (WINAPI*)(HANDLE, THREAD_INFORMATION_CLASS, PVOID, ULONG,
- PULONG);
+ NTSTATUS(WINAPI*)(HANDLE, THREAD_INFORMATION_CLASS, PVOID, ULONG, PULONG);
- const NtQueryInformationThreadFunction nt_query_information_thread =
- reinterpret_cast<NtQueryInformationThreadFunction>(
- ::GetProcAddress(::GetModuleHandle(L"ntdll.dll"),
- "NtQueryInformationThread"));
+ const auto nt_query_information_thread =
+ reinterpret_cast<NtQueryInformationThreadFunction>(::GetProcAddress(
+ ::GetModuleHandle(L"ntdll.dll"), "NtQueryInformationThread"));
if (!nt_query_information_thread)
return nullptr;
THREAD_BASIC_INFORMATION basic_info = {0};
- NTSTATUS status =
- nt_query_information_thread(thread_handle, ThreadBasicInformation,
- &basic_info, sizeof(THREAD_BASIC_INFORMATION),
- nullptr);
+ NTSTATUS status = nt_query_information_thread(
+ thread_handle, ThreadBasicInformation, &basic_info,
+ sizeof(THREAD_BASIC_INFORMATION), nullptr);
if (status != 0)
return nullptr;
@@ -83,9 +89,11 @@ const TEB* GetThreadEnvironmentBlock(HANDLE thread_handle) {
#if defined(_WIN64)
// If the value at |pointer| points to the original stack, rewrite it to point
// to the corresponding location in the copied stack.
-void RewritePointerIfInOriginalStack(uintptr_t top, uintptr_t bottom,
- void* stack_copy, const void** pointer) {
- const uintptr_t value = reinterpret_cast<uintptr_t>(*pointer);
+void RewritePointerIfInOriginalStack(uintptr_t top,
+ uintptr_t bottom,
+ void* stack_copy,
+ const void** pointer) {
+ const auto value = reinterpret_cast<uintptr_t>(*pointer);
if (value >= bottom && value < top) {
*pointer = reinterpret_cast<const void*>(
static_cast<unsigned char*>(stack_copy) + (value - bottom));
@@ -122,23 +130,17 @@ void CopyMemoryFromStack(void* to, const void* from, size_t length)
// Note: this function must not access memory in the original stack as it may
// have been changed or deallocated by this point. This is why |top| and
// |bottom| are passed as uintptr_t.
-void RewritePointersToStackMemory(uintptr_t top, uintptr_t bottom,
- CONTEXT* context, void* stack_copy) {
+void RewritePointersToStackMemory(uintptr_t top,
+ uintptr_t bottom,
+ CONTEXT* context,
+ void* stack_copy) {
#if defined(_WIN64)
- DWORD64 CONTEXT::* const nonvolatile_registers[] = {
- &CONTEXT::R12,
- &CONTEXT::R13,
- &CONTEXT::R14,
- &CONTEXT::R15,
- &CONTEXT::Rdi,
- &CONTEXT::Rsi,
- &CONTEXT::Rbx,
- &CONTEXT::Rbp,
- &CONTEXT::Rsp
- };
+ DWORD64 CONTEXT::*const nonvolatile_registers[] = {
+ &CONTEXT::R12, &CONTEXT::R13, &CONTEXT::R14, &CONTEXT::R15, &CONTEXT::Rdi,
+ &CONTEXT::Rsi, &CONTEXT::Rbx, &CONTEXT::Rbp, &CONTEXT::Rsp};
// Rewrite pointers in the context.
- for (size_t i = 0; i < arraysize(nonvolatile_registers); ++i) {
+ for (size_t i = 0; i < size(nonvolatile_registers); ++i) {
DWORD64* const reg = &(context->*nonvolatile_registers[i]);
RewritePointerIfInOriginalStack(top, bottom, stack_copy,
reinterpret_cast<const void**>(reg));
@@ -159,8 +161,7 @@ struct RecordedFrame {
RecordedFrame(RecordedFrame&& other)
: instruction_pointer(other.instruction_pointer),
- module(std::move(other.module)) {
- }
+ module(std::move(other.module)) {}
RecordedFrame& operator=(RecordedFrame&& other) {
instruction_pointer = other.instruction_pointer;
@@ -220,14 +221,14 @@ std::string GetBuildIDForModule(HMODULE module_handle) {
DWORD age;
win::PEImage(module_handle).GetDebugId(&guid, &age, /* pdb_file= */ nullptr);
const int kGUIDSize = 39;
- std::wstring build_id;
+ string16 build_id;
int result =
::StringFromGUID2(guid, WriteInto(&build_id, kGUIDSize), kGUIDSize);
if (result != kGUIDSize)
return std::string();
RemoveChars(build_id, L"{}-", &build_id);
build_id += StringPrintf(L"%d", age);
- return WideToUTF8(build_id);
+ return UTF16ToUTF8(build_id);
}
// ScopedDisablePriorityBoost -------------------------------------------------
@@ -315,8 +316,7 @@ ScopedSuspendThread::~ScopedSuspendThread() {
bool PointsToGuardPage(uintptr_t stack_pointer) {
MEMORY_BASIC_INFORMATION memory_info;
SIZE_T result = ::VirtualQuery(reinterpret_cast<LPCVOID>(stack_pointer),
- &memory_info,
- sizeof(memory_info));
+ &memory_info, sizeof(memory_info));
return result != 0 && (memory_info.Protect & PAGE_GUARD);
}
@@ -333,8 +333,7 @@ void SuspendThreadAndRecordStack(
void* stack_copy_buffer,
size_t stack_copy_buffer_size,
std::vector<RecordedFrame>* stack,
- NativeStackSampler::AnnotateCallback annotator,
- StackSamplingProfiler::Sample* sample,
+ ProfileBuilder* profile_builder,
NativeStackSamplerTestDelegate* test_delegate) {
DCHECK(stack->empty());
@@ -343,7 +342,7 @@ void SuspendThreadAndRecordStack(
// The stack bounds are saved to uintptr_ts for use outside
// ScopedSuspendThread, as the thread's memory is not safe to dereference
// beyond that point.
- const uintptr_t top = reinterpret_cast<uintptr_t>(base_address);
+ const auto top = reinterpret_cast<uintptr_t>(base_address);
uintptr_t bottom = 0u;
{
@@ -369,7 +368,7 @@ void SuspendThreadAndRecordStack(
if (PointsToGuardPage(bottom))
return;
- (*annotator)(sample);
+ profile_builder->RecordAnnotations();
CopyMemoryFromStack(stack_copy_buffer,
reinterpret_cast<const void*>(bottom), top - bottom);
@@ -388,163 +387,126 @@ void SuspendThreadAndRecordStack(
class NativeStackSamplerWin : public NativeStackSampler {
public:
NativeStackSamplerWin(win::ScopedHandle thread_handle,
- AnnotateCallback annotator,
NativeStackSamplerTestDelegate* test_delegate);
~NativeStackSamplerWin() override;
// StackSamplingProfiler::NativeStackSampler:
- void ProfileRecordingStarting(
- std::vector<StackSamplingProfiler::Module>* modules) override;
- void RecordStackSample(StackBuffer* stack_buffer,
- StackSamplingProfiler::Sample* sample) override;
- void ProfileRecordingStopped(StackBuffer* stack_buffer) override;
+ void ProfileRecordingStarting() override;
+ std::vector<InternalFrame> RecordStackFrames(
+ StackBuffer* stack_buffer,
+ ProfileBuilder* profile_builder) override;
private:
// Attempts to query the module filename, base address, and id for
- // |module_handle|, and store them in |module|. Returns true if it succeeded.
- static bool GetModuleForHandle(HMODULE module_handle,
- StackSamplingProfiler::Module* module);
-
- // Gets the index for the Module corresponding to |module_handle| in
- // |modules|, adding it if it's not already present. Returns
- // StackSamplingProfiler::Frame::kUnknownModuleIndex if no Module can be
- // determined for |module|.
- size_t GetModuleIndex(HMODULE module_handle,
- std::vector<StackSamplingProfiler::Module>* modules);
-
- // Copies the information represented by |stack| into |sample| and |modules|.
- void CopyToSample(const std::vector<RecordedFrame>& stack,
- StackSamplingProfiler::Sample* sample,
- std::vector<StackSamplingProfiler::Module>* modules);
+ // |module_handle|, and returns them in an InternalModule object.
+ static InternalModule GetModuleForHandle(HMODULE module_handle);
- win::ScopedHandle thread_handle_;
+ // Creates a set of internal frames with the information represented by
+ // |stack|.
+ std::vector<InternalFrame> CreateInternalFrames(
+ const std::vector<RecordedFrame>& stack);
- const AnnotateCallback annotator_;
+ win::ScopedHandle thread_handle_;
NativeStackSamplerTestDelegate* const test_delegate_;
// The stack base address corresponding to |thread_handle_|.
const void* const thread_stack_base_address_;
- // Weak. Points to the modules associated with the profile being recorded
- // between ProfileRecordingStarting() and ProfileRecordingStopped().
- std::vector<StackSamplingProfiler::Module>* current_modules_;
-
- // Maps a module handle to the corresponding Module's index within
- // current_modules_.
- std::map<HMODULE, size_t> profile_module_index_;
+ // The internal module objects, indexed by the module handle.
+ std::map<HMODULE, InternalModule> module_cache_;
DISALLOW_COPY_AND_ASSIGN(NativeStackSamplerWin);
};
NativeStackSamplerWin::NativeStackSamplerWin(
win::ScopedHandle thread_handle,
- AnnotateCallback annotator,
NativeStackSamplerTestDelegate* test_delegate)
: thread_handle_(thread_handle.Take()),
- annotator_(annotator),
test_delegate_(test_delegate),
thread_stack_base_address_(
- GetThreadEnvironmentBlock(thread_handle_.Get())->Tib.StackBase) {
- DCHECK(annotator_);
-}
+ GetThreadEnvironmentBlock(thread_handle_.Get())->Tib.StackBase) {}
-NativeStackSamplerWin::~NativeStackSamplerWin() {
-}
+NativeStackSamplerWin::~NativeStackSamplerWin() {}
-void NativeStackSamplerWin::ProfileRecordingStarting(
- std::vector<StackSamplingProfiler::Module>* modules) {
- current_modules_ = modules;
- profile_module_index_.clear();
+void NativeStackSamplerWin::ProfileRecordingStarting() {
+ module_cache_.clear();
}
-void NativeStackSamplerWin::RecordStackSample(
+std::vector<InternalFrame> NativeStackSamplerWin::RecordStackFrames(
StackBuffer* stack_buffer,
- StackSamplingProfiler::Sample* sample) {
+ ProfileBuilder* profile_builder) {
DCHECK(stack_buffer);
- DCHECK(current_modules_);
std::vector<RecordedFrame> stack;
SuspendThreadAndRecordStack(thread_handle_.Get(), thread_stack_base_address_,
stack_buffer->buffer(), stack_buffer->size(),
- &stack, annotator_, sample, test_delegate_);
- CopyToSample(stack, sample, current_modules_);
-}
+ &stack, profile_builder, test_delegate_);
-void NativeStackSamplerWin::ProfileRecordingStopped(StackBuffer* stack_buffer) {
- current_modules_ = nullptr;
+ return CreateInternalFrames(stack);
}
// static
-bool NativeStackSamplerWin::GetModuleForHandle(
- HMODULE module_handle,
- StackSamplingProfiler::Module* module) {
+InternalModule NativeStackSamplerWin::GetModuleForHandle(
+ HMODULE module_handle) {
wchar_t module_name[MAX_PATH];
DWORD result_length =
- GetModuleFileName(module_handle, module_name, arraysize(module_name));
+ ::GetModuleFileName(module_handle, module_name, size(module_name));
if (result_length == 0)
- return false;
+ return InternalModule();
- module->filename = base::FilePath(module_name);
+ const std::string& module_id = GetBuildIDForModule(module_handle);
+ if (module_id.empty())
+ return InternalModule();
- module->base_address = reinterpret_cast<uintptr_t>(module_handle);
+ return InternalModule(reinterpret_cast<uintptr_t>(module_handle), module_id,
+ FilePath(module_name));
+}
- module->id = GetBuildIDForModule(module_handle);
- if (module->id.empty())
- return false;
+std::vector<InternalFrame> NativeStackSamplerWin::CreateInternalFrames(
+ const std::vector<RecordedFrame>& stack) {
+ std::vector<InternalFrame> internal_frames;
+ internal_frames.reserve(stack.size());
- return true;
-}
+ for (const auto& frame : stack) {
+ auto frame_ip = reinterpret_cast<uintptr_t>(frame.instruction_pointer);
-size_t NativeStackSamplerWin::GetModuleIndex(
- HMODULE module_handle,
- std::vector<StackSamplingProfiler::Module>* modules) {
- if (!module_handle)
- return StackSamplingProfiler::Frame::kUnknownModuleIndex;
-
- auto loc = profile_module_index_.find(module_handle);
- if (loc == profile_module_index_.end()) {
- StackSamplingProfiler::Module module;
- if (!GetModuleForHandle(module_handle, &module))
- return StackSamplingProfiler::Frame::kUnknownModuleIndex;
- modules->push_back(module);
- loc = profile_module_index_.insert(std::make_pair(
- module_handle, modules->size() - 1)).first;
- }
+ HMODULE module_handle = frame.module.Get();
+ if (!module_handle) {
+ internal_frames.emplace_back(frame_ip, InternalModule());
+ continue;
+ }
- return loc->second;
-}
+ auto loc = module_cache_.find(module_handle);
+ if (loc != module_cache_.end()) {
+ internal_frames.emplace_back(frame_ip, loc->second);
+ continue;
+ }
-void NativeStackSamplerWin::CopyToSample(
- const std::vector<RecordedFrame>& stack,
- StackSamplingProfiler::Sample* sample,
- std::vector<StackSamplingProfiler::Module>* modules) {
- sample->frames.clear();
- sample->frames.reserve(stack.size());
-
- for (const RecordedFrame& frame : stack) {
- sample->frames.push_back(StackSamplingProfiler::Frame(
- reinterpret_cast<uintptr_t>(frame.instruction_pointer),
- GetModuleIndex(frame.module.Get(), modules)));
+ InternalModule internal_module = GetModuleForHandle(module_handle);
+ if (internal_module.is_valid)
+ module_cache_.insert(std::make_pair(module_handle, internal_module));
+
+ internal_frames.emplace_back(frame_ip, std::move(internal_module));
}
+
+ return internal_frames;
}
} // namespace
std::unique_ptr<NativeStackSampler> NativeStackSampler::Create(
PlatformThreadId thread_id,
- AnnotateCallback annotator,
NativeStackSamplerTestDelegate* test_delegate) {
#if _WIN64
// Get the thread's handle.
HANDLE thread_handle = ::OpenThread(
THREAD_GET_CONTEXT | THREAD_SUSPEND_RESUME | THREAD_QUERY_INFORMATION,
- FALSE,
- thread_id);
+ FALSE, thread_id);
if (thread_handle) {
return std::unique_ptr<NativeStackSampler>(new NativeStackSamplerWin(
- win::ScopedHandle(thread_handle), annotator, test_delegate));
+ win::ScopedHandle(thread_handle), test_delegate));
}
#endif
return std::unique_ptr<NativeStackSampler>();
diff --git a/chromium/base/profiler/stack_sampling_profiler.cc b/chromium/base/profiler/stack_sampling_profiler.cc
index a8cddf08650..02df814f8cc 100644
--- a/chromium/base/profiler/stack_sampling_profiler.cc
+++ b/chromium/base/profiler/stack_sampling_profiler.cc
@@ -5,7 +5,6 @@
#include "base/profiler/stack_sampling_profiler.h"
#include <algorithm>
-#include <map>
#include <utility>
#include "base/atomic_sequence_num.h"
@@ -13,7 +12,6 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/callback.h"
-#include "base/lazy_instance.h"
#include "base/location.h"
#include "base/macros.h"
#include "base/memory/ptr_util.h"
@@ -27,6 +25,8 @@
namespace base {
+const size_t kUnknownModuleIndex = static_cast<size_t>(-1);
+
namespace {
// This value is used to initialize the WaitableEvent object. This MUST BE set
@@ -37,29 +37,14 @@ constexpr WaitableEvent::ResetPolicy kResetPolicy =
// This value is used when there is no collection in progress and thus no ID
// for referencing the active collection to the SamplingThread.
-const int NULL_PROFILER_ID = -1;
-
-void ChangeAtomicFlags(subtle::Atomic32* flags,
- subtle::Atomic32 set,
- subtle::Atomic32 clear) {
- DCHECK(set != 0 || clear != 0);
- DCHECK_EQ(0, set & clear);
-
- subtle::Atomic32 bits = subtle::NoBarrier_Load(flags);
- while (true) {
- subtle::Atomic32 existing =
- subtle::NoBarrier_CompareAndSwap(flags, bits, (bits | set) & ~clear);
- if (existing == bits)
- break;
- bits = existing;
- }
-}
+const int kNullProfilerId = -1;
} // namespace
// StackSamplingProfiler::Module ----------------------------------------------
StackSamplingProfiler::Module::Module() : base_address(0u) {}
+
StackSamplingProfiler::Module::Module(uintptr_t base_address,
const std::string& id,
const FilePath& filename)
@@ -67,6 +52,17 @@ StackSamplingProfiler::Module::Module(uintptr_t base_address,
StackSamplingProfiler::Module::~Module() = default;
+// StackSamplingProfiler::InternalModule --------------------------------------
+
+StackSamplingProfiler::InternalModule::InternalModule() : is_valid(false) {}
+
+StackSamplingProfiler::InternalModule::InternalModule(uintptr_t base_address,
+ const std::string& id,
+ const FilePath& filename)
+ : base_address(base_address), id(id), filename(filename), is_valid(true) {}
+
+StackSamplingProfiler::InternalModule::~InternalModule() = default;
+
// StackSamplingProfiler::Frame -----------------------------------------------
StackSamplingProfiler::Frame::Frame(uintptr_t instruction_pointer,
@@ -76,8 +72,17 @@ StackSamplingProfiler::Frame::Frame(uintptr_t instruction_pointer,
StackSamplingProfiler::Frame::~Frame() = default;
StackSamplingProfiler::Frame::Frame()
- : instruction_pointer(0), module_index(kUnknownModuleIndex) {
-}
+ : instruction_pointer(0), module_index(kUnknownModuleIndex) {}
+
+// StackSamplingProfiler::InternalFrame -------------------------------------
+
+StackSamplingProfiler::InternalFrame::InternalFrame(
+ uintptr_t instruction_pointer,
+ InternalModule internal_module)
+ : instruction_pointer(instruction_pointer),
+ internal_module(std::move(internal_module)) {}
+
+StackSamplingProfiler::InternalFrame::~InternalFrame() = default;
// StackSamplingProfiler::Sample ----------------------------------------------
@@ -145,61 +150,58 @@ class StackSamplingProfiler::SamplingThread : public Thread {
};
struct CollectionContext {
- CollectionContext(int profiler_id,
- PlatformThreadId target,
+ CollectionContext(PlatformThreadId target,
const SamplingParams& params,
- const CompletedCallback& callback,
WaitableEvent* finished,
- std::unique_ptr<NativeStackSampler> sampler)
- : profiler_id(profiler_id),
+ std::unique_ptr<NativeStackSampler> sampler,
+ std::unique_ptr<ProfileBuilder> profile_builder)
+ : collection_id(next_collection_id.GetNext()),
target(target),
params(params),
- callback(callback),
finished(finished),
- native_sampler(std::move(sampler)) {}
+ native_sampler(std::move(sampler)),
+ profile_builder(std::move(profile_builder)) {}
~CollectionContext() = default;
- // An identifier for the profiler associated with this collection, used to
- // uniquely identify the collection to outside interests.
- const int profiler_id;
+ // An identifier for this collection, used to uniquely identify the
+ // collection to outside interests.
+ const int collection_id;
- const PlatformThreadId target; // ID of The thread being sampled.
- const SamplingParams params; // Information about how to sample.
- const CompletedCallback callback; // Callback made when sampling complete.
- WaitableEvent* const finished; // Signaled when all sampling complete.
+ const PlatformThreadId target; // ID of The thread being sampled.
+ const SamplingParams params; // Information about how to sample.
+ WaitableEvent* const finished; // Signaled when all sampling complete.
// Platform-specific module that does the actual sampling.
std::unique_ptr<NativeStackSampler> native_sampler;
+ // Receives the sampling data and builds a CallStackProfile.
+ std::unique_ptr<ProfileBuilder> profile_builder;
+
// The absolute time for the next sample.
Time next_sample_time;
// The time that a profile was started, for calculating the total duration.
Time profile_start_time;
- // Counters that indicate the current position along the acquisition.
- int burst = 0;
- int sample = 0;
-
- // The collected stack samples. The active profile is always at the back().
- CallStackProfiles profiles;
+ // Counter that indicates the current sample position along the acquisition.
+ int sample_count = 0;
- // Sequence number for generating new profiler ids.
- static AtomicSequenceNumber next_profiler_id;
+ // Sequence number for generating new collection ids.
+ static AtomicSequenceNumber next_collection_id;
};
// Gets the single instance of this class.
static SamplingThread* GetInstance();
// Adds a new CollectionContext to the thread. This can be called externally
- // from any thread. This returns an ID that can later be used to stop
- // the sampling.
+ // from any thread. This returns a collection id that can later be used to
+ // stop the sampling.
int Add(std::unique_ptr<CollectionContext> collection);
- // Removes an active collection based on its ID, forcing it to run its
- // callback if any data has been collected. This can be called externally
+ // Removes an active collection based on its collection id, forcing it to run
+ // its callback if any data has been collected. This can be called externally
// from any thread.
- void Remove(int id);
+ void Remove(int collection_id);
private:
friend class TestAPI;
@@ -235,28 +237,21 @@ class StackSamplingProfiler::SamplingThread : public Thread {
// Get task runner that is usable from the sampling thread itself.
scoped_refptr<SingleThreadTaskRunner> GetTaskRunnerOnSamplingThread();
- // Finishes a collection and reports collected data via callback. The
- // collection's |finished| waitable event will be signalled. The |collection|
- // should already have been removed from |active_collections_| by the caller,
- // as this is needed to avoid flakyness in unit tests.
+ // Finishes a collection. The collection's |finished| waitable event will be
+ // signalled. The |collection| should already have been removed from
+ // |active_collections_| by the caller, as this is needed to avoid flakiness
+ // in unit tests.
void FinishCollection(CollectionContext* collection);
- // Records a single sample of a collection.
- void RecordSample(CollectionContext* collection);
-
// Check if the sampling thread is idle and begin a shutdown if it is.
void ScheduleShutdownIfIdle();
// These methods are tasks that get posted to the internal message queue.
void AddCollectionTask(std::unique_ptr<CollectionContext> collection);
- void RemoveCollectionTask(int id);
- void PerformCollectionTask(int id);
+ void RemoveCollectionTask(int collection_id);
+ void RecordSampleTask(int collection_id);
void ShutdownTask(int add_events);
- // Updates the |next_sample_time| time based on configured parameters.
- // Returns true if there is a next sample or false if sampling is complete.
- bool UpdateNextSampleTime(CollectionContext* collection);
-
// Thread:
void CleanUp() override;
@@ -265,10 +260,10 @@ class StackSamplingProfiler::SamplingThread : public Thread {
// that take it are not called concurrently.
std::unique_ptr<NativeStackSampler::StackBuffer> stack_buffer_;
- // A map of IDs to collection contexts. Because this class is a singleton
- // that is never destroyed, context objects will never be destructed except
- // by explicit action. Thus, it's acceptable to pass unretained pointers
- // to these objects when posting tasks.
+ // A map of collection ids to collection contexts. Because this class is a
+ // singleton that is never destroyed, context objects will never be destructed
+ // except by explicit action. Thus, it's acceptable to pass unretained
+ // pointers to these objects when posting tasks.
std::map<int, std::unique_ptr<CollectionContext>> active_collections_;
// State maintained about the current execution (or non-execution) of
@@ -285,7 +280,7 @@ class StackSamplingProfiler::SamplingThread : public Thread {
// A counter that notes adds of new collection requests. It is incremented
// when changes occur so that delayed shutdown tasks are able to detect if
- // samething new has happened while it was waiting. Like all "execution_state"
+ // something new has happened while it was waiting. Like all "execution_state"
// vars, this must be accessed while holding |thread_execution_state_lock_|.
int thread_execution_state_add_events_ = 0;
@@ -371,8 +366,8 @@ void StackSamplingProfiler::SamplingThread::TestAPI::ShutdownTaskAndSignalEvent(
event->Signal();
}
-AtomicSequenceNumber
- StackSamplingProfiler::SamplingThread::CollectionContext::next_profiler_id;
+AtomicSequenceNumber StackSamplingProfiler::SamplingThread::CollectionContext::
+ next_collection_id;
StackSamplingProfiler::SamplingThread::SamplingThread()
: Thread("StackSamplingProfiler") {}
@@ -388,7 +383,7 @@ int StackSamplingProfiler::SamplingThread::Add(
std::unique_ptr<CollectionContext> collection) {
// This is not to be run on the sampling thread.
- int id = collection->profiler_id;
+ int collection_id = collection->collection_id;
scoped_refptr<SingleThreadTaskRunner> task_runner =
GetOrCreateTaskRunnerForAdd();
@@ -396,10 +391,10 @@ int StackSamplingProfiler::SamplingThread::Add(
FROM_HERE, BindOnce(&SamplingThread::AddCollectionTask, Unretained(this),
std::move(collection)));
- return id;
+ return collection_id;
}
-void StackSamplingProfiler::SamplingThread::Remove(int id) {
+void StackSamplingProfiler::SamplingThread::Remove(int collection_id) {
// This is not to be run on the sampling thread.
ThreadExecutionState state;
@@ -411,9 +406,9 @@ void StackSamplingProfiler::SamplingThread::Remove(int id) {
// This can fail if the thread were to exit between acquisition of the task
// runner above and the call below. In that case, however, everything has
// stopped so there's no need to try to stop it.
- task_runner->PostTask(
- FROM_HERE,
- BindOnce(&SamplingThread::RemoveCollectionTask, Unretained(this), id));
+ task_runner->PostTask(FROM_HERE,
+ BindOnce(&SamplingThread::RemoveCollectionTask,
+ Unretained(this), collection_id));
}
scoped_refptr<SingleThreadTaskRunner>
@@ -496,61 +491,18 @@ StackSamplingProfiler::SamplingThread::GetTaskRunnerOnSamplingThread() {
void StackSamplingProfiler::SamplingThread::FinishCollection(
CollectionContext* collection) {
DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
- DCHECK_EQ(0u, active_collections_.count(collection->profiler_id));
-
- // If there is no duration for the final profile (because it was stopped),
- // calculate it now.
- if (!collection->profiles.empty() &&
- collection->profiles.back().profile_duration == TimeDelta()) {
- collection->profiles.back().profile_duration =
- Time::Now() - collection->profile_start_time +
- collection->params.sampling_interval;
- }
+ DCHECK_EQ(0u, active_collections_.count(collection->collection_id));
- // Extract some information so callback and event-signalling can still be
- // done after the collection has been removed from the list of "active" ones.
- // This allows the the controlling object (and tests using it) to be confident
- // that collection is fully finished when those things occur.
- const CompletedCallback callback = collection->callback;
- CallStackProfiles profiles = std::move(collection->profiles);
- WaitableEvent* finished = collection->finished;
+ TimeDelta profile_duration = Time::Now() - collection->profile_start_time +
+ collection->params.sampling_interval;
- // Run the associated callback, passing the collected profiles.
- callback.Run(std::move(profiles));
+ collection->profile_builder->OnProfileCompleted(
+ profile_duration, collection->params.sampling_interval);
// Signal that this collection is finished.
- finished->Signal();
-}
+ collection->finished->Signal();
-void StackSamplingProfiler::SamplingThread::RecordSample(
- CollectionContext* collection) {
- DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
- DCHECK(collection->native_sampler);
-
- // If this is the first sample of a burst, a new Profile needs to be created
- // and filled.
- if (collection->sample == 0) {
- collection->profiles.push_back(CallStackProfile());
- CallStackProfile& profile = collection->profiles.back();
- profile.sampling_period = collection->params.sampling_interval;
- collection->profile_start_time = Time::Now();
- collection->native_sampler->ProfileRecordingStarting(&profile.modules);
- }
-
- // The currently active profile being captured.
- CallStackProfile& profile = collection->profiles.back();
-
- // Record a single sample.
- profile.samples.push_back(Sample());
- collection->native_sampler->RecordStackSample(stack_buffer_.get(),
- &profile.samples.back());
-
- // If this is the last sample of a burst, record the total time.
- if (collection->sample == collection->params.samples_per_burst - 1) {
- profile.profile_duration = Time::Now() - collection->profile_start_time +
- collection->params.sampling_interval;
- collection->native_sampler->ProfileRecordingStopped(stack_buffer_.get());
- }
+ ScheduleShutdownIfIdle();
}
void StackSamplingProfiler::SamplingThread::ScheduleShutdownIfIdle() {
@@ -577,16 +529,16 @@ void StackSamplingProfiler::SamplingThread::AddCollectionTask(
std::unique_ptr<CollectionContext> collection) {
DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
- const int profiler_id = collection->profiler_id;
+ const int collection_id = collection->collection_id;
const TimeDelta initial_delay = collection->params.initial_delay;
active_collections_.insert(
- std::make_pair(profiler_id, std::move(collection)));
+ std::make_pair(collection_id, std::move(collection)));
GetTaskRunnerOnSamplingThread()->PostDelayedTask(
FROM_HERE,
- BindOnce(&SamplingThread::PerformCollectionTask, Unretained(this),
- profiler_id),
+ BindOnce(&SamplingThread::RecordSampleTask, Unretained(this),
+ collection_id),
initial_delay);
// Another increment of "add events" serves to invalidate any pending
@@ -598,26 +550,27 @@ void StackSamplingProfiler::SamplingThread::AddCollectionTask(
}
}
-void StackSamplingProfiler::SamplingThread::RemoveCollectionTask(int id) {
+void StackSamplingProfiler::SamplingThread::RemoveCollectionTask(
+ int collection_id) {
DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
- auto found = active_collections_.find(id);
+ auto found = active_collections_.find(collection_id);
if (found == active_collections_.end())
return;
// Remove |collection| from |active_collections_|.
std::unique_ptr<CollectionContext> collection = std::move(found->second);
- size_t count = active_collections_.erase(id);
+ size_t count = active_collections_.erase(collection_id);
DCHECK_EQ(1U, count);
FinishCollection(collection.get());
- ScheduleShutdownIfIdle();
}
-void StackSamplingProfiler::SamplingThread::PerformCollectionTask(int id) {
+void StackSamplingProfiler::SamplingThread::RecordSampleTask(
+ int collection_id) {
DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
- auto found = active_collections_.find(id);
+ auto found = active_collections_.find(collection_id);
// The task won't be found if it has been stopped.
if (found == active_collections_.end())
@@ -625,34 +578,43 @@ void StackSamplingProfiler::SamplingThread::PerformCollectionTask(int id) {
CollectionContext* collection = found->second.get();
- // Handle first-run with no "next time".
- if (collection->next_sample_time == Time())
+ // If this is the first sample, the collection params need to be filled.
+ if (collection->sample_count == 0) {
+ collection->profile_start_time = Time::Now();
collection->next_sample_time = Time::Now();
+ collection->native_sampler->ProfileRecordingStarting();
+ }
- // Do the collection of a single sample.
- RecordSample(collection);
-
- // Update the time of the next sample recording.
- const bool collection_finished = !UpdateNextSampleTime(collection);
- if (!collection_finished) {
+ // Record a single sample.
+ collection->profile_builder->OnSampleCompleted(
+ collection->native_sampler->RecordStackFrames(
+ stack_buffer_.get(), collection->profile_builder.get()));
+
+ // Schedule the next sample recording if there is one.
+ if (++collection->sample_count < collection->params.samples_per_profile) {
+ // This will keep a consistent average interval between samples but will
+ // result in constant series of acquisitions, thus nearly locking out the
+ // target thread, if the interval is smaller than the time it takes to
+ // actually acquire the sample. Anything sampling that quickly is going
+ // to be a problem anyway so don't worry about it.
+ collection->next_sample_time += collection->params.sampling_interval;
bool success = GetTaskRunnerOnSamplingThread()->PostDelayedTask(
FROM_HERE,
- BindOnce(&SamplingThread::PerformCollectionTask, Unretained(this), id),
+ BindOnce(&SamplingThread::RecordSampleTask, Unretained(this),
+ collection_id),
std::max(collection->next_sample_time - Time::Now(), TimeDelta()));
DCHECK(success);
return;
}
- // Take ownership of |collection| and remove it from the map. If collection is
- // to be restarted, a new collection task will be added below.
+ // Take ownership of |collection| and remove it from the map.
std::unique_ptr<CollectionContext> owned_collection =
std::move(found->second);
- size_t count = active_collections_.erase(id);
+ size_t count = active_collections_.erase(collection_id);
DCHECK_EQ(1U, count);
// All capturing has completed so finish the collection.
FinishCollection(collection);
- ScheduleShutdownIfIdle();
}
void StackSamplingProfiler::SamplingThread::ShutdownTask(int add_events) {
@@ -689,27 +651,6 @@ void StackSamplingProfiler::SamplingThread::ShutdownTask(int add_events) {
stack_buffer_.reset();
}
-bool StackSamplingProfiler::SamplingThread::UpdateNextSampleTime(
- CollectionContext* collection) {
- // This will keep a consistent average interval between samples but will
- // result in constant series of acquisitions, thus nearly locking out the
- // target thread, if the interval is smaller than the time it takes to
- // actually acquire the sample. Anything sampling that quickly is going
- // to be a problem anyway so don't worry about it.
- if (++collection->sample < collection->params.samples_per_burst) {
- collection->next_sample_time += collection->params.sampling_interval;
- return true;
- }
-
- if (++collection->burst < collection->params.bursts) {
- collection->sample = 0;
- collection->next_sample_time += collection->params.burst_interval;
- return true;
- }
-
- return false;
-}
-
void StackSamplingProfiler::SamplingThread::CleanUp() {
DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
@@ -725,12 +666,6 @@ void StackSamplingProfiler::SamplingThread::CleanUp() {
// static
void StackSamplingProfiler::TestAPI::Reset() {
SamplingThread::TestAPI::Reset();
- ResetAnnotations();
-}
-
-// static
-void StackSamplingProfiler::TestAPI::ResetAnnotations() {
- subtle::NoBarrier_Store(&process_milestones_, 0u);
}
// static
@@ -749,30 +684,30 @@ void StackSamplingProfiler::TestAPI::PerformSamplingThreadIdleShutdown(
SamplingThread::TestAPI::ShutdownAssumingIdle(simulate_intervening_start);
}
-subtle::Atomic32 StackSamplingProfiler::process_milestones_ = 0;
-
StackSamplingProfiler::StackSamplingProfiler(
const SamplingParams& params,
- const CompletedCallback& callback,
+ std::unique_ptr<ProfileBuilder> profile_builder,
NativeStackSamplerTestDelegate* test_delegate)
- : StackSamplingProfiler(base::PlatformThread::CurrentId(),
+ : StackSamplingProfiler(PlatformThread::CurrentId(),
params,
- callback,
+ std::move(profile_builder),
test_delegate) {}
StackSamplingProfiler::StackSamplingProfiler(
PlatformThreadId thread_id,
const SamplingParams& params,
- const CompletedCallback& callback,
+ std::unique_ptr<ProfileBuilder> profile_builder,
NativeStackSamplerTestDelegate* test_delegate)
: thread_id_(thread_id),
params_(params),
- completed_callback_(callback),
+ profile_builder_(std::move(profile_builder)),
// The event starts "signaled" so code knows it's safe to start thread
// and "manual" so that it can be waited in multiple places.
profiling_inactive_(kResetPolicy, WaitableEvent::InitialState::SIGNALED),
- profiler_id_(NULL_PROFILER_ID),
- test_delegate_(test_delegate) {}
+ profiler_id_(kNullProfilerId),
+ test_delegate_(test_delegate) {
+ DCHECK(profile_builder_);
+}
StackSamplingProfiler::~StackSamplingProfiler() {
// Stop returns immediately but the shutdown runs asynchronously. There is a
@@ -794,12 +729,13 @@ StackSamplingProfiler::~StackSamplingProfiler() {
}
void StackSamplingProfiler::Start() {
- if (completed_callback_.is_null())
- return;
+ // Multiple calls to Start() for a single StackSamplingProfiler object is not
+ // allowed. If profile_builder_ is nullptr, then Start() has been called
+ // already.
+ DCHECK(profile_builder_);
std::unique_ptr<NativeStackSampler> native_sampler =
- NativeStackSampler::Create(thread_id_, &RecordAnnotations,
- test_delegate_);
+ NativeStackSampler::Create(thread_id_, test_delegate_);
if (!native_sampler)
return;
@@ -816,34 +752,17 @@ void StackSamplingProfiler::Start() {
profiling_inactive_.Wait();
profiling_inactive_.Reset();
- DCHECK_EQ(NULL_PROFILER_ID, profiler_id_);
+ DCHECK_EQ(kNullProfilerId, profiler_id_);
profiler_id_ = SamplingThread::GetInstance()->Add(
std::make_unique<SamplingThread::CollectionContext>(
- SamplingThread::CollectionContext::next_profiler_id.GetNext(),
- thread_id_, params_, completed_callback_, &profiling_inactive_,
- std::move(native_sampler)));
- DCHECK_NE(NULL_PROFILER_ID, profiler_id_);
+ thread_id_, params_, &profiling_inactive_, std::move(native_sampler),
+ std::move(profile_builder_)));
+ DCHECK_NE(kNullProfilerId, profiler_id_);
}
void StackSamplingProfiler::Stop() {
SamplingThread::GetInstance()->Remove(profiler_id_);
- profiler_id_ = NULL_PROFILER_ID;
-}
-
-// static
-void StackSamplingProfiler::SetProcessMilestone(int milestone) {
- DCHECK_LE(0, milestone);
- DCHECK_GT(static_cast<int>(sizeof(process_milestones_) * 8), milestone);
- DCHECK_EQ(0, subtle::NoBarrier_Load(&process_milestones_) & (1 << milestone));
- ChangeAtomicFlags(&process_milestones_, 1 << milestone, 0);
-}
-
-// static
-void StackSamplingProfiler::RecordAnnotations(Sample* sample) {
- // The code inside this method must not do anything that could acquire a
- // mutex, including allocating memory (which includes LOG messages) because
- // that mutex could be held by a stopped thread, thus resulting in deadlock.
- sample->process_milestones = subtle::NoBarrier_Load(&process_milestones_);
+ profiler_id_ = kNullProfilerId;
}
// StackSamplingProfiler::Frame global functions ------------------------------
@@ -851,7 +770,7 @@ void StackSamplingProfiler::RecordAnnotations(Sample* sample) {
bool operator==(const StackSamplingProfiler::Module& a,
const StackSamplingProfiler::Module& b) {
return a.base_address == b.base_address && a.id == b.id &&
- a.filename == b.filename;
+ a.filename == b.filename;
}
bool operator==(const StackSamplingProfiler::Sample& a,
@@ -866,25 +785,24 @@ bool operator!=(const StackSamplingProfiler::Sample& a,
bool operator<(const StackSamplingProfiler::Sample& a,
const StackSamplingProfiler::Sample& b) {
- if (a.process_milestones < b.process_milestones)
- return true;
- if (a.process_milestones > b.process_milestones)
- return false;
+ if (a.process_milestones != b.process_milestones)
+ return a.process_milestones < b.process_milestones;
return a.frames < b.frames;
}
-bool operator==(const StackSamplingProfiler::Frame &a,
- const StackSamplingProfiler::Frame &b) {
+bool operator==(const StackSamplingProfiler::Frame& a,
+ const StackSamplingProfiler::Frame& b) {
return a.instruction_pointer == b.instruction_pointer &&
- a.module_index == b.module_index;
+ a.module_index == b.module_index;
}
-bool operator<(const StackSamplingProfiler::Frame &a,
- const StackSamplingProfiler::Frame &b) {
- return (a.module_index < b.module_index) ||
- (a.module_index == b.module_index &&
- a.instruction_pointer < b.instruction_pointer);
+bool operator<(const StackSamplingProfiler::Frame& a,
+ const StackSamplingProfiler::Frame& b) {
+ if (a.module_index != b.module_index)
+ return a.module_index < b.module_index;
+
+ return a.instruction_pointer < b.instruction_pointer;
}
} // namespace base
diff --git a/chromium/base/profiler/stack_sampling_profiler.h b/chromium/base/profiler/stack_sampling_profiler.h
index 2f9ade55eea..e43349a8fe0 100644
--- a/chromium/base/profiler/stack_sampling_profiler.h
+++ b/chromium/base/profiler/stack_sampling_profiler.h
@@ -7,13 +7,12 @@
#include <stddef.h>
+#include <map>
#include <memory>
#include <string>
#include <vector>
-#include "base/atomicops.h"
#include "base/base_export.h"
-#include "base/callback.h"
#include "base/files/file_path.h"
#include "base/macros.h"
#include "base/strings/string16.h"
@@ -23,7 +22,9 @@
namespace base {
-class NativeStackSampler;
+// Identifies an unknown module.
+BASE_EXPORT extern const size_t kUnknownModuleIndex;
+
class NativeStackSamplerTestDelegate;
// StackSamplingProfiler periodically stops a thread to sample its stack, for
@@ -35,34 +36,24 @@ class NativeStackSamplerTestDelegate;
//
// // Create and customize params as desired.
// base::StackStackSamplingProfiler::SamplingParams params;
-// // Any thread's ID may be passed as the target.
-// base::StackSamplingProfiler profiler(base::PlatformThread::CurrentId()),
-// params);
//
-// // Or, to process the profiles within Chrome rather than via UMA, use a
-// // custom completed callback:
-// base::StackStackSamplingProfiler::CompletedCallback
-// thread_safe_callback = ...;
+// // To process the profiles, use a custom ProfileBuilder subclass:
+// class SubProfileBuilder :
+// public base::StackSamplingProfiler::ProfileBuilder{...}
// base::StackSamplingProfiler profiler(base::PlatformThread::CurrentId()),
-// params, thread_safe_callback);
+// params, std::make_unique<SubProfileBuilder>(...));
//
// profiler.Start();
// // ... work being done on the target thread here ...
// profiler.Stop(); // optional, stops collection before complete per params
//
-// The default SamplingParams causes stacks to be recorded in a single burst at
-// a 10Hz interval for a total of 30 seconds. All of these parameters may be
+// The default SamplingParams causes stacks to be recorded in a single profile
+// at a 10Hz interval for a total of 30 seconds. All of these parameters may be
// altered as desired.
//
-// When all call stack profiles are complete, or the profiler is stopped, the
-// completed callback is called from a thread created by the profiler with the
-// collected profiles.
-//
-// The results of the profiling are passed to the completed callback and consist
-// of a vector of CallStackProfiles. Each CallStackProfile corresponds to a
-// burst as specified in SamplingParams and contains a set of Samples and
-// Modules. One Sample corresponds to a single recorded stack, and the Modules
-// record those modules associated with the recorded stack frames.
+// When a call stack profile is complete, or the profiler is stopped,
+// ProfileBuilder's OnProfileCompleted function is called from a thread created
+// by the profiler.
class BASE_EXPORT StackSamplingProfiler {
public:
// Module represents the module (DLL or exe) corresponding to a stack frame.
@@ -89,11 +80,39 @@ class BASE_EXPORT StackSamplingProfiler {
FilePath filename;
};
+ // InternalModule represents the module (DLL or exe) and its validness state.
+ // Different from Module, it has an additional field "is_valid".
+ //
+ // This struct is only used for sampling data transfer from NativeStackSampler
+ // to ProfileBuilder.
+ struct BASE_EXPORT InternalModule {
+ InternalModule();
+ InternalModule(uintptr_t base_address,
+ const std::string& id,
+ const FilePath& filename);
+ ~InternalModule();
+
+ // Points to the base address of the module.
+ uintptr_t base_address;
+
+ // An opaque binary string that uniquely identifies a particular program
+ // version with high probability. This is parsed from headers of the loaded
+ // module.
+ // For binaries generated by GNU tools:
+ // Contents of the .note.gnu.build-id field.
+ // On Windows:
+ // GUID + AGE in the debug image headers of a module.
+ std::string id;
+
+ // The filename of the module.
+ FilePath filename;
+
+ // The validness of the module.
+ bool is_valid;
+ };
+
// Frame represents an individual sampled stack frame with module information.
struct BASE_EXPORT Frame {
- // Identifies an unknown module.
- static const size_t kUnknownModuleIndex = static_cast<size_t>(-1);
-
Frame(uintptr_t instruction_pointer, size_t module_index);
~Frame();
@@ -108,6 +127,23 @@ class BASE_EXPORT StackSamplingProfiler {
size_t module_index;
};
+ // InternalFrame represents an individual sampled stack frame with full module
+ // information. This is different from Frame which only contains module index.
+ //
+ // This struct is only used for sampling data transfer from NativeStackSampler
+ // to ProfileBuilder.
+ struct BASE_EXPORT InternalFrame {
+ InternalFrame(uintptr_t instruction_pointer,
+ InternalModule internal_module);
+ ~InternalFrame();
+
+ // The sampled instruction pointer within the function.
+ uintptr_t instruction_pointer;
+
+ // The module information.
+ InternalModule internal_module;
+ };
+
// Sample represents a set of stack frames with some extra information.
struct BASE_EXPORT Sample {
Sample();
@@ -157,24 +193,15 @@ class BASE_EXPORT StackSamplingProfiler {
DISALLOW_ASSIGN(CallStackProfile);
};
- using CallStackProfiles = std::vector<CallStackProfile>;
-
// Represents parameters that configure the sampling.
struct BASE_EXPORT SamplingParams {
// Time to delay before first samples are taken.
TimeDelta initial_delay = TimeDelta::FromMilliseconds(0);
- // Number of sampling bursts to perform.
- int bursts = 1;
-
- // Interval between sampling bursts. This is the desired duration from the
- // start of one burst to the start of the next burst.
- TimeDelta burst_interval = TimeDelta::FromSeconds(10);
-
- // Number of samples to record per burst.
- int samples_per_burst = 300;
+ // Number of samples to record per profile.
+ int samples_per_profile = 300;
- // Interval between samples during a sampling burst. This is the desired
+ // Interval between samples during a sampling profile. This is the desired
// duration from the start of one sample to the start of the next sample.
TimeDelta sampling_interval = TimeDelta::FromMilliseconds(100);
};
@@ -189,9 +216,6 @@ class BASE_EXPORT StackSamplingProfiler {
// so that tests don't inherit state from previous tests.
static void Reset();
- // Resets internal annotations (like process phase) to initial values.
- static void ResetAnnotations();
-
// Returns whether the sampling thread is currently running or not.
static bool IsSamplingThreadRunning();
@@ -211,41 +235,58 @@ class BASE_EXPORT StackSamplingProfiler {
bool simulate_intervening_start);
};
- // The callback type used to collect completed profiles. The passed |profiles|
- // are move-only. Other threads, including the UI thread, may block on
- // callback completion so this should run as quickly as possible.
- //
- // IMPORTANT NOTE: The callback is invoked on a thread the profiler
- // constructs, rather than on the thread used to construct the profiler and
- // set the callback, and thus the callback must be callable on any thread. For
- // threads with message loops that create StackSamplingProfilers, posting a
- // task to the message loop with the moved (i.e. std::move) profiles is the
- // thread-safe callback implementation.
- using CompletedCallback = Callback<void(CallStackProfiles)>;
-
- // Creates a profiler for the CURRENT thread that sends completed profiles
- // to |callback|. An optional |test_delegate| can be supplied by tests.
- // The caller must ensure that this object gets destroyed before the current
- // thread exits.
+ // The ProfileBuilder interface allows the user to record profile information
+ // on the fly in whatever format is desired. Functions are invoked by the
+ // profiler on its own thread so must not block or perform expensive
+ // operations.
+ class BASE_EXPORT ProfileBuilder {
+ public:
+ ProfileBuilder() = default;
+ virtual ~ProfileBuilder() = default;
+
+ // Metadata associated with the sample to be saved off.
+ // The code implementing this method must not do anything that could acquire
+ // a mutex, including allocating memory (which includes LOG messages)
+ // because that mutex could be held by a stopped thread, thus resulting in
+ // deadlock.
+ virtual void RecordAnnotations() = 0;
+
+ // Records a new set of internal frames. Invoked when sampling a sample
+ // completes.
+ virtual void OnSampleCompleted(
+ std::vector<InternalFrame> internal_frames) = 0;
+
+ // Finishes the profile construction with |profile_duration| and
+ // |sampling_period|. Invoked when sampling a profile completes.
+ virtual void OnProfileCompleted(TimeDelta profile_duration,
+ TimeDelta sampling_period) = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ProfileBuilder);
+ };
+
+ // Creates a profiler for the CURRENT thread. An optional |test_delegate| can
+ // be supplied by tests. The caller must ensure that this object gets
+ // destroyed before the current thread exits.
StackSamplingProfiler(
const SamplingParams& params,
- const CompletedCallback& callback,
+ std::unique_ptr<ProfileBuilder> profile_builder,
NativeStackSamplerTestDelegate* test_delegate = nullptr);
- // Creates a profiler for ANOTHER thread that sends completed profiles to
- // |callback|. An optional |test_delegate| can be supplied by tests.
+ // Creates a profiler for ANOTHER thread. An optional |test_delegate| can be
+ // supplied by tests.
//
// IMPORTANT: The caller must ensure that the thread being sampled does not
// exit before this object gets destructed or Bad Things(tm) may occur.
StackSamplingProfiler(
PlatformThreadId thread_id,
const SamplingParams& params,
- const CompletedCallback& callback,
+ std::unique_ptr<ProfileBuilder> profile_builder,
NativeStackSamplerTestDelegate* test_delegate = nullptr);
// Stops any profiling currently taking place before destroying the profiler.
- // This will block until the callback has been run if profiling has started
- // but not already finished.
+ // This will block until profile_builder_'s OnProfileCompleted function has
+ // executed if profiling has started but not already finished.
~StackSamplingProfiler();
// Initializes the profiler and starts sampling. Might block on a
@@ -254,21 +295,13 @@ class BASE_EXPORT StackSamplingProfiler {
void Start();
// Stops the profiler and any ongoing sampling. This method will return
- // immediately with the callback being run asynchronously. At most one
- // more stack sample will be taken after this method returns. Calling this
- // function is optional; if not invoked profiling terminates when all the
- // profiling bursts specified in the SamplingParams are completed or the
- // profiler object is destroyed, whichever occurs first.
+ // immediately with the profile_builder_'s OnProfileCompleted function being
+ // run asynchronously. At most one more stack sample will be taken after this
+ // method returns. Calling this function is optional; if not invoked profiling
+ // terminates when all the profiling samples specified in the SamplingParams
+ // are completed or the profiler object is destroyed, whichever occurs first.
void Stop();
- // Set the current system state that is recorded with each captured stack
- // frame. This is thread-safe so can be called from anywhere. The parameter
- // value should be from an enumeration of the appropriate type with values
- // ranging from 0 to 31, inclusive. This sets bits within Sample field of
- // |process_milestones|. The actual meanings of these bits are defined
- // (globally) by the caller(s).
- static void SetProcessMilestone(int milestone);
-
private:
friend class TestAPI;
@@ -276,31 +309,21 @@ class BASE_EXPORT StackSamplingProfiler {
// the target thread.
class SamplingThread;
- // Adds annotations to a Sample.
- static void RecordAnnotations(Sample* sample);
-
- // This global variables holds the current system state and is recorded with
- // every captured sample, done on a separate thread which is why updates to
- // this must be atomic. A PostTask to move the the updates to that thread
- // would skew the timing and a lock could result in deadlock if the thread
- // making a change was also being profiled and got stopped.
- static subtle::Atomic32 process_milestones_;
-
// The thread whose stack will be sampled.
PlatformThreadId thread_id_;
const SamplingParams params_;
- const CompletedCallback completed_callback_;
+ // Receives the sampling data and builds a CallStackProfile. The ownership of
+ // this object will be transferred to the sampling thread when thread sampling
+ // starts.
+ std::unique_ptr<ProfileBuilder> profile_builder_;
// This starts "signaled", is reset when sampling begins, and is signaled
- // when that sampling is complete and the callback done.
+ // when that sampling is complete and the profile_builder_'s
+ // OnProfileCompleted function has executed.
WaitableEvent profiling_inactive_;
- // Object that does the native sampling. This is created during construction
- // and later passed to the sampling thread when profiling is started.
- std::unique_ptr<NativeStackSampler> native_sampler_;
-
// An ID uniquely identifying this profiler to the sampling thread. This
// will be an internal "null" value when no collection has been started.
int profiler_id_;
diff --git a/chromium/base/profiler/stack_sampling_profiler_unittest.cc b/chromium/base/profiler/stack_sampling_profiler_unittest.cc
index 8fc25c92c19..b0f883624f6 100644
--- a/chromium/base/profiler/stack_sampling_profiler_unittest.cc
+++ b/chromium/base/profiler/stack_sampling_profiler_unittest.cc
@@ -12,6 +12,7 @@
#include <vector>
#include "base/bind.h"
+#include "base/callback.h"
#include "base/compiler_specific.h"
#include "base/files/file_util.h"
#include "base/macros.h"
@@ -22,10 +23,12 @@
#include "base/profiler/stack_sampling_profiler.h"
#include "base/run_loop.h"
#include "base/scoped_native_library.h"
+#include "base/stl_util.h"
#include "base/strings/stringprintf.h"
#include "base/strings/utf_string_conversions.h"
#include "base/synchronization/lock.h"
#include "base/synchronization/waitable_event.h"
+#include "base/test/bind_test_util.h"
#include "base/threading/platform_thread.h"
#include "base/threading/simple_thread.h"
#include "base/time/time.h"
@@ -61,11 +64,13 @@ namespace base {
using SamplingParams = StackSamplingProfiler::SamplingParams;
using Frame = StackSamplingProfiler::Frame;
-using Frames = std::vector<StackSamplingProfiler::Frame>;
+using Frames = std::vector<Frame>;
+using InternalFrame = StackSamplingProfiler::InternalFrame;
+using InternalFrames = std::vector<InternalFrame>;
+using InternalFrameSets = std::vector<std::vector<InternalFrame>>;
using Module = StackSamplingProfiler::Module;
+using InternalModule = StackSamplingProfiler::InternalModule;
using Sample = StackSamplingProfiler::Sample;
-using CallStackProfile = StackSamplingProfiler::CallStackProfile;
-using CallStackProfiles = StackSamplingProfiler::CallStackProfiles;
namespace {
@@ -92,8 +97,9 @@ struct StackConfiguration {
// Signature for a target function that is expected to appear in the stack. See
// SignalAndWaitUntilSignaled() below. The return value should be a program
// counter pointer near the end of the function.
-using TargetFunction = const void*(*)(WaitableEvent*, WaitableEvent*,
- const StackConfiguration*);
+using TargetFunction = const void* (*)(WaitableEvent*,
+ WaitableEvent*,
+ const StackConfiguration*);
// A thread to target for profiling, whose stack is guaranteed to contain
// SignalAndWaitUntilSignaled() when coordinated with the main thread.
@@ -149,7 +155,7 @@ class TargetThread : public PlatformThread::Delegate {
};
// Callback function to be provided when calling through the other library.
- static void OtherLibraryCallback(void *arg);
+ static void OtherLibraryCallback(void* arg);
// Returns the current program counter, or a value very close to it.
static const void* GetProgramCounter();
@@ -237,18 +243,15 @@ NOINLINE const void* TargetThread::CallThroughOtherLibrary(
const StackConfiguration* stack_config) {
if (stack_config) {
// A function whose arguments are a function accepting void*, and a void*.
- using InvokeCallbackFunction = void(*)(void (*)(void*), void*);
+ using InvokeCallbackFunction = void (*)(void (*)(void*), void*);
EXPECT_TRUE(stack_config->library);
InvokeCallbackFunction function = reinterpret_cast<InvokeCallbackFunction>(
GetFunctionPointerFromNativeLibrary(stack_config->library,
"InvokeCallbackFunction"));
EXPECT_TRUE(function);
- TargetFunctionArgs args = {
- thread_started_event,
- finish_event,
- stack_config
- };
+ TargetFunctionArgs args = {thread_started_event, finish_event,
+ stack_config};
(*function)(&OtherLibraryCallback, &args);
}
@@ -258,7 +261,7 @@ NOINLINE const void* TargetThread::CallThroughOtherLibrary(
}
// static
-void TargetThread::OtherLibraryCallback(void *arg) {
+void TargetThread::OtherLibraryCallback(void* arg) {
const TargetFunctionArgs* args = static_cast<TargetFunctionArgs*>(arg);
SignalAndWaitUntilSignaled(args->thread_started_event, args->finish_event,
args->stack_config);
@@ -277,6 +280,92 @@ NOINLINE const void* TargetThread::GetProgramCounter() {
#endif
}
+// Profile consists of a set of internal frame sets and other sampling
+// information.
+struct Profile {
+ Profile() = default;
+ Profile(Profile&& other) = default;
+ Profile(const InternalFrameSets& frame_sets,
+ int annotation_count,
+ TimeDelta profile_duration,
+ TimeDelta sampling_period);
+
+ ~Profile() = default;
+
+ Profile& operator=(Profile&& other) = default;
+
+ // The collected internal frame sets.
+ InternalFrameSets frame_sets;
+
+ // The number of invocations of RecordAnnotations().
+ int annotation_count;
+
+ // Duration of this profile.
+ TimeDelta profile_duration;
+
+ // Time between samples.
+ TimeDelta sampling_period;
+};
+
+Profile::Profile(const InternalFrameSets& frame_sets,
+ int annotation_count,
+ TimeDelta profile_duration,
+ TimeDelta sampling_period)
+ : frame_sets(frame_sets),
+ annotation_count(annotation_count),
+ profile_duration(profile_duration),
+ sampling_period(sampling_period) {}
+
+// The callback type used to collect a profile. The passed Profile is move-only.
+// Other threads, including the UI thread, may block on callback completion so
+// this should run as quickly as possible.
+using ProfileCompletedCallback = Callback<void(Profile)>;
+
+// TestProfileBuilder collects internal frames produced by the profiler.
+class TestProfileBuilder : public StackSamplingProfiler::ProfileBuilder {
+ public:
+ TestProfileBuilder(const ProfileCompletedCallback& callback);
+
+ ~TestProfileBuilder() override;
+
+ // StackSamplingProfiler::ProfileBuilder:
+ void RecordAnnotations() override;
+ void OnSampleCompleted(InternalFrames internal_frames) override;
+ void OnProfileCompleted(TimeDelta profile_duration,
+ TimeDelta sampling_period) override;
+
+ private:
+ // The sets of internal frames recorded.
+ std::vector<InternalFrames> frame_sets_;
+
+ // The number of invocations of RecordAnnotations().
+ int annotation_count_ = 0;
+
+ // Callback made when sampling a profile completes.
+ const ProfileCompletedCallback callback_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestProfileBuilder);
+};
+
+TestProfileBuilder::TestProfileBuilder(const ProfileCompletedCallback& callback)
+ : callback_(callback) {}
+
+TestProfileBuilder::~TestProfileBuilder() = default;
+
+void TestProfileBuilder::RecordAnnotations() {
+ ++annotation_count_;
+}
+
+void TestProfileBuilder::OnSampleCompleted(InternalFrames internal_frames) {
+ frame_sets_.push_back(std::move(internal_frames));
+}
+
+void TestProfileBuilder::OnProfileCompleted(TimeDelta profile_duration,
+ TimeDelta sampling_period) {
+ callback_.Run(Profile(frame_sets_, annotation_count_, profile_duration,
+ sampling_period));
+}
+
// Loads the other library, which defines a function to be called in the
// WITH_OTHER_LIBRARY configuration.
NativeLibrary LoadOtherLibrary() {
@@ -310,7 +399,7 @@ void SynchronousUnloadNativeLibrary(NativeLibrary library) {
HMODULE module_handle;
// Keep trying to get the module handle until the call fails.
while (::GetModuleHandleEx(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS |
- GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
+ GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
reinterpret_cast<LPCTSTR>(module_base_address),
&module_handle) ||
::GetLastError() != ERROR_MOD_NOT_FOUND) {
@@ -323,22 +412,6 @@ void SynchronousUnloadNativeLibrary(NativeLibrary library) {
#endif
}
-// Called on the profiler thread when complete, to collect profiles.
-void SaveProfiles(CallStackProfiles* profiles,
- CallStackProfiles pending_profiles) {
- *profiles = std::move(pending_profiles);
-}
-
-// Called on the profiler thread when complete. Collects profiles produced by
-// the profiler, and signals an event to allow the main thread to know that that
-// the profiler is done.
-void SaveProfilesAndSignalEvent(CallStackProfiles* profiles,
- WaitableEvent* event,
- CallStackProfiles pending_profiles) {
- *profiles = std::move(pending_profiles);
- event->Signal();
-}
-
// Executes the function with the target thread running and executing within
// SignalAndWaitUntilSignaled(). Performs all necessary target thread startup
// and shutdown work before and afterward.
@@ -371,14 +444,16 @@ struct TestProfilerInfo {
WaitableEvent::InitialState::NOT_SIGNALED),
profiler(thread_id,
params,
- Bind(&SaveProfilesAndSignalEvent,
- Unretained(&profiles),
- Unretained(&completed)),
+ std::make_unique<TestProfileBuilder>(
+ BindLambdaForTesting([this](Profile result_profile) {
+ profile = std::move(result_profile);
+ completed.Signal();
+ })),
delegate) {}
// The order here is important to ensure objects being referenced don't get
// destructed until after the objects referencing them.
- CallStackProfiles profiles;
+ Profile profile;
WaitableEvent completed;
StackSamplingProfiler profiler;
@@ -401,21 +476,22 @@ std::vector<std::unique_ptr<TestProfilerInfo>> CreateProfilers(
return profilers;
}
-// Captures profiles as specified by |params| on the TargetThread, and returns
-// them in |profiles|. Waits up to |profiler_wait_time| for the profiler to
-// complete.
-void CaptureProfiles(const SamplingParams& params, TimeDelta profiler_wait_time,
- CallStackProfiles* profiles) {
- WithTargetThread([&params, profiles,
+// Captures internal frames as specified by |params| on the TargetThread, and
+// returns them. Waits up to |profiler_wait_time| for the profiler to complete.
+InternalFrameSets CaptureFrameSets(const SamplingParams& params,
+ TimeDelta profiler_wait_time) {
+ InternalFrameSets frame_sets;
+ WithTargetThread([&params, &frame_sets,
profiler_wait_time](PlatformThreadId target_thread_id) {
TestProfilerInfo info(target_thread_id, params);
info.profiler.Start();
info.completed.TimedWait(profiler_wait_time);
info.profiler.Stop();
info.completed.Wait();
-
- *profiles = std::move(info.profiles);
+ frame_sets = std::move(info.profile.frame_sets);
});
+
+ return frame_sets;
}
// Waits for one of multiple samplings to complete.
@@ -457,38 +533,39 @@ const void* MaybeFixupFunctionAddressForILT(const void* function_address) {
// Searches through the frames in |sample|, returning an iterator to the first
// frame that has an instruction pointer within |target_function|. Returns
// sample.end() if no such frames are found.
-Frames::const_iterator FindFirstFrameWithinFunction(
- const Sample& sample,
+InternalFrames::const_iterator FindFirstFrameWithinFunction(
+ const InternalFrames& frames,
TargetFunction target_function) {
- uintptr_t function_start = reinterpret_cast<uintptr_t>(
- MaybeFixupFunctionAddressForILT(reinterpret_cast<const void*>(
- target_function)));
+ uintptr_t function_start =
+ reinterpret_cast<uintptr_t>(MaybeFixupFunctionAddressForILT(
+ reinterpret_cast<const void*>(target_function)));
uintptr_t function_end =
reinterpret_cast<uintptr_t>(target_function(nullptr, nullptr, nullptr));
- for (auto it = sample.frames.begin(); it != sample.frames.end(); ++it) {
- if ((it->instruction_pointer >= function_start) &&
- (it->instruction_pointer <= function_end))
+ for (auto it = frames.begin(); it != frames.end(); ++it) {
+ if (it->instruction_pointer >= function_start &&
+ it->instruction_pointer <= function_end) {
return it;
+ }
}
- return sample.frames.end();
+ return frames.end();
}
// Formats a sample into a string that can be output for test diagnostics.
-std::string FormatSampleForDiagnosticOutput(
- const Sample& sample,
- const std::vector<Module>& modules) {
+std::string FormatSampleForDiagnosticOutput(const InternalFrames& frames) {
std::string output;
- for (const Frame& frame : sample.frames) {
+ for (const auto& frame : frames) {
output += StringPrintf(
"0x%p %s\n", reinterpret_cast<const void*>(frame.instruction_pointer),
- modules[frame.module_index].filename.AsUTF8Unsafe().c_str());
+ frame.internal_module.filename.AsUTF8Unsafe().c_str());
}
return output;
}
// Returns a duration that is longer than the test timeout. We would use
// TimeDelta::Max() but https://crbug.com/465948.
-TimeDelta AVeryLongTimeDelta() { return TimeDelta::FromDays(1); }
+TimeDelta AVeryLongTimeDelta() {
+ return TimeDelta::FromDays(1);
+}
// Tests the scenario where the library is unloaded after copying the stack, but
// before walking it. If |wait_until_unloaded| is true, ensures that the
@@ -520,12 +597,11 @@ void TestLibraryUnload(bool wait_until_unloaded) {
SamplingParams params;
params.sampling_interval = TimeDelta::FromMilliseconds(0);
- params.samples_per_burst = 1;
+ params.samples_per_profile = 1;
NativeLibrary other_library = LoadOtherLibrary();
TargetThread target_thread(StackConfiguration(
- StackConfiguration::WITH_OTHER_LIBRARY,
- other_library));
+ StackConfiguration::WITH_OTHER_LIBRARY, other_library));
PlatformThreadHandle target_thread_handle;
EXPECT_TRUE(PlatformThread::Create(0, &target_thread, &target_thread_handle));
@@ -535,18 +611,22 @@ void TestLibraryUnload(bool wait_until_unloaded) {
WaitableEvent sampling_thread_completed(
WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
- std::vector<CallStackProfile> profiles;
- const StackSamplingProfiler::CompletedCallback callback =
- Bind(&SaveProfilesAndSignalEvent, Unretained(&profiles),
- Unretained(&sampling_thread_completed));
+ Profile profile;
+
WaitableEvent stack_copied(WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
WaitableEvent start_stack_walk(WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
StackCopiedSignaler test_delegate(&stack_copied, &start_stack_walk,
wait_until_unloaded);
- StackSamplingProfiler profiler(target_thread.id(), params, callback,
- &test_delegate);
+ StackSamplingProfiler profiler(
+ target_thread.id(), params,
+ std::make_unique<TestProfileBuilder>(BindLambdaForTesting(
+ [&profile, &sampling_thread_completed](Profile result_profile) {
+ profile = std::move(result_profile);
+ sampling_thread_completed.Signal();
+ })),
+ &test_delegate);
profiler.Start();
@@ -568,58 +648,57 @@ void TestLibraryUnload(bool wait_until_unloaded) {
// on that event.
start_stack_walk.Signal();
- // Wait for the sampling thread to complete and fill out |profiles|.
+ // Wait for the sampling thread to complete and fill out |profile|.
sampling_thread_completed.Wait();
- // Look up the sample.
- ASSERT_EQ(1u, profiles.size());
- const CallStackProfile& profile = profiles[0];
- ASSERT_EQ(1u, profile.samples.size());
- const Sample& sample = profile.samples[0];
+ // Look up the frames.
+ ASSERT_EQ(1u, profile.frame_sets.size());
+ const InternalFrames& frames = profile.frame_sets[0];
// Check that the stack contains a frame for
// TargetThread::SignalAndWaitUntilSignaled().
- Frames::const_iterator end_frame = FindFirstFrameWithinFunction(
- sample, &TargetThread::SignalAndWaitUntilSignaled);
- ASSERT_TRUE(end_frame != sample.frames.end())
+ InternalFrames::const_iterator end_frame = FindFirstFrameWithinFunction(
+ frames, &TargetThread::SignalAndWaitUntilSignaled);
+ ASSERT_TRUE(end_frame != frames.end())
<< "Function at "
<< MaybeFixupFunctionAddressForILT(reinterpret_cast<const void*>(
&TargetThread::SignalAndWaitUntilSignaled))
<< " was not found in stack:\n"
- << FormatSampleForDiagnosticOutput(sample, profile.modules);
+ << FormatSampleForDiagnosticOutput(frames);
if (wait_until_unloaded) {
// The stack should look like this, resulting one frame after
- // SignalAndWaitUntilSignaled. The frame in the now-unloaded library is not
- // recorded since we can't get module information.
+ // SignalAndWaitUntilSignaled. The frame in the now-unloaded library is
+ // not recorded since we can't get module information.
//
// ... WaitableEvent and system frames ...
// TargetThread::SignalAndWaitUntilSignaled
// TargetThread::OtherLibraryCallback
- EXPECT_EQ(2, sample.frames.end() - end_frame)
+ EXPECT_EQ(2, frames.end() - end_frame)
<< "Stack:\n"
- << FormatSampleForDiagnosticOutput(sample, profile.modules);
+ << FormatSampleForDiagnosticOutput(frames);
} else {
// We didn't wait for the asynchronous unloading to complete, so the results
// are non-deterministic: if the library finished unloading we should have
// the same stack as |wait_until_unloaded|, if not we should have the full
// stack. The important thing is that we should not crash.
- if (sample.frames.end() - end_frame == 2) {
+ if (frames.end() - end_frame == 2) {
// This is the same case as |wait_until_unloaded|.
return;
}
// Check that the stack contains a frame for
// TargetThread::CallThroughOtherLibrary().
- Frames::const_iterator other_library_frame = FindFirstFrameWithinFunction(
- sample, &TargetThread::CallThroughOtherLibrary);
- ASSERT_TRUE(other_library_frame != sample.frames.end())
+ InternalFrames::const_iterator other_library_frame =
+ FindFirstFrameWithinFunction(frames,
+ &TargetThread::CallThroughOtherLibrary);
+ ASSERT_TRUE(other_library_frame != frames.end())
<< "Function at "
<< MaybeFixupFunctionAddressForILT(reinterpret_cast<const void*>(
&TargetThread::CallThroughOtherLibrary))
<< " was not found in stack:\n"
- << FormatSampleForDiagnosticOutput(sample, profile.modules);
+ << FormatSampleForDiagnosticOutput(frames);
// The stack should look like this, resulting in three frames between
// SignalAndWaitUntilSignaled and CallThroughOtherLibrary:
@@ -631,7 +710,7 @@ void TestLibraryUnload(bool wait_until_unloaded) {
// TargetThread::CallThroughOtherLibrary
EXPECT_EQ(3, other_library_frame - end_frame)
<< "Stack:\n"
- << FormatSampleForDiagnosticOutput(sample, profile.modules);
+ << FormatSampleForDiagnosticOutput(frames);
}
}
@@ -656,8 +735,9 @@ class StackSamplingProfilerTest : public testing::Test {
} // namespace
-// Checks that the basic expected information is present in a sampled call stack
-// profile.
+// Checks that the basic expected information is present in sampled internal
+// frames.
+//
// macOS ASAN is not yet supported - crbug.com/718628.
#if !(defined(ADDRESS_SANITIZER) && defined(OS_MACOSX))
#define MAYBE_Basic Basic
@@ -667,67 +747,28 @@ class StackSamplingProfilerTest : public testing::Test {
PROFILER_TEST_F(StackSamplingProfilerTest, MAYBE_Basic) {
SamplingParams params;
params.sampling_interval = TimeDelta::FromMilliseconds(0);
- params.samples_per_burst = 1;
-
- std::vector<CallStackProfile> profiles;
- CaptureProfiles(params, AVeryLongTimeDelta(), &profiles);
-
- // Check that the profile and samples sizes are correct, and the module
- // indices are in range.
- ASSERT_EQ(1u, profiles.size());
- const CallStackProfile& profile = profiles[0];
- ASSERT_EQ(1u, profile.samples.size());
- EXPECT_EQ(params.sampling_interval, profile.sampling_period);
- const Sample& sample = profile.samples[0];
- EXPECT_EQ(0u, sample.process_milestones);
- for (const auto& frame : sample.frames) {
- ASSERT_GE(frame.module_index, 0u);
- ASSERT_LT(frame.module_index, profile.modules.size());
- }
+ params.samples_per_profile = 1;
+
+ InternalFrameSets frame_sets = CaptureFrameSets(params, AVeryLongTimeDelta());
+
+ // Check that the size of the frame sets are correct.
+ ASSERT_EQ(1u, frame_sets.size());
+ const InternalFrames& frames = frame_sets[0];
+
+ // Check that all the modules are valid.
+ for (const auto& frame : frames)
+ EXPECT_TRUE(frame.internal_module.is_valid);
// Check that the stack contains a frame for
- // TargetThread::SignalAndWaitUntilSignaled() and that the frame has this
- // executable's module.
- Frames::const_iterator loc = FindFirstFrameWithinFunction(
- sample, &TargetThread::SignalAndWaitUntilSignaled);
- ASSERT_TRUE(loc != sample.frames.end())
+ // TargetThread::SignalAndWaitUntilSignaled().
+ InternalFrames::const_iterator loc = FindFirstFrameWithinFunction(
+ frames, &TargetThread::SignalAndWaitUntilSignaled);
+ ASSERT_TRUE(loc != frames.end())
<< "Function at "
<< MaybeFixupFunctionAddressForILT(reinterpret_cast<const void*>(
&TargetThread::SignalAndWaitUntilSignaled))
<< " was not found in stack:\n"
- << FormatSampleForDiagnosticOutput(sample, profile.modules);
- FilePath executable_path;
- EXPECT_TRUE(PathService::Get(FILE_EXE, &executable_path));
- EXPECT_EQ(executable_path,
- MakeAbsoluteFilePath(profile.modules[loc->module_index].filename));
-}
-
-// Checks that annotations are recorded in samples.
-PROFILER_TEST_F(StackSamplingProfilerTest, Annotations) {
- SamplingParams params;
- params.sampling_interval = TimeDelta::FromMilliseconds(0);
- params.samples_per_burst = 1;
-
- // Check that a run picks up annotations.
- StackSamplingProfiler::SetProcessMilestone(1);
- std::vector<CallStackProfile> profiles1;
- CaptureProfiles(params, AVeryLongTimeDelta(), &profiles1);
- ASSERT_EQ(1u, profiles1.size());
- const CallStackProfile& profile1 = profiles1[0];
- ASSERT_EQ(1u, profile1.samples.size());
- const Sample& sample1 = profile1.samples[0];
- EXPECT_EQ(1u << 1, sample1.process_milestones);
-
- // Run it a second time but with changed annotations. These annotations
- // should appear in the first acquired sample.
- StackSamplingProfiler::SetProcessMilestone(2);
- std::vector<CallStackProfile> profiles2;
- CaptureProfiles(params, AVeryLongTimeDelta(), &profiles2);
- ASSERT_EQ(1u, profiles2.size());
- const CallStackProfile& profile2 = profiles2[0];
- ASSERT_EQ(1u, profile2.samples.size());
- const Sample& sample2 = profile2.samples[0];
- EXPECT_EQ(sample1.process_milestones | (1u << 2), sample2.process_milestones);
+ << FormatSampleForDiagnosticOutput(frames);
}
// Checks that the profiler handles stacks containing dynamically-allocated
@@ -741,71 +782,55 @@ PROFILER_TEST_F(StackSamplingProfilerTest, Annotations) {
PROFILER_TEST_F(StackSamplingProfilerTest, MAYBE_Alloca) {
SamplingParams params;
params.sampling_interval = TimeDelta::FromMilliseconds(0);
- params.samples_per_burst = 1;
+ params.samples_per_profile = 1;
- std::vector<CallStackProfile> profiles;
+ Profile profile;
WithTargetThread(
- [&params, &profiles](PlatformThreadId target_thread_id) {
+ [&params, &profile](PlatformThreadId target_thread_id) {
WaitableEvent sampling_thread_completed(
WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
- const StackSamplingProfiler::CompletedCallback callback =
- Bind(&SaveProfilesAndSignalEvent, Unretained(&profiles),
- Unretained(&sampling_thread_completed));
- StackSamplingProfiler profiler(target_thread_id, params, callback);
+ StackSamplingProfiler profiler(
+ target_thread_id, params,
+ std::make_unique<TestProfileBuilder>(BindLambdaForTesting(
+ [&profile, &sampling_thread_completed](Profile result_profile) {
+ profile = std::move(result_profile);
+ sampling_thread_completed.Signal();
+ })));
profiler.Start();
sampling_thread_completed.Wait();
},
StackConfiguration(StackConfiguration::WITH_ALLOCA));
- // Look up the sample.
- ASSERT_EQ(1u, profiles.size());
- const CallStackProfile& profile = profiles[0];
- ASSERT_EQ(1u, profile.samples.size());
- const Sample& sample = profile.samples[0];
+ // Look up the frames.
+ ASSERT_EQ(1u, profile.frame_sets.size());
+ const InternalFrames& frames = profile.frame_sets[0];
// Check that the stack contains a frame for
// TargetThread::SignalAndWaitUntilSignaled().
- Frames::const_iterator end_frame = FindFirstFrameWithinFunction(
- sample, &TargetThread::SignalAndWaitUntilSignaled);
- ASSERT_TRUE(end_frame != sample.frames.end())
+ InternalFrames::const_iterator end_frame = FindFirstFrameWithinFunction(
+ frames, &TargetThread::SignalAndWaitUntilSignaled);
+ ASSERT_TRUE(end_frame != frames.end())
<< "Function at "
<< MaybeFixupFunctionAddressForILT(reinterpret_cast<const void*>(
&TargetThread::SignalAndWaitUntilSignaled))
<< " was not found in stack:\n"
- << FormatSampleForDiagnosticOutput(sample, profile.modules);
+ << FormatSampleForDiagnosticOutput(frames);
// Check that the stack contains a frame for TargetThread::CallWithAlloca().
- Frames::const_iterator alloca_frame =
- FindFirstFrameWithinFunction(sample, &TargetThread::CallWithAlloca);
- ASSERT_TRUE(alloca_frame != sample.frames.end())
+ InternalFrames::const_iterator alloca_frame =
+ FindFirstFrameWithinFunction(frames, &TargetThread::CallWithAlloca);
+ ASSERT_TRUE(alloca_frame != frames.end())
<< "Function at "
<< MaybeFixupFunctionAddressForILT(
reinterpret_cast<const void*>(&TargetThread::CallWithAlloca))
<< " was not found in stack:\n"
- << FormatSampleForDiagnosticOutput(sample, profile.modules);
+ << FormatSampleForDiagnosticOutput(frames);
// These frames should be adjacent on the stack.
EXPECT_EQ(1, alloca_frame - end_frame)
<< "Stack:\n"
- << FormatSampleForDiagnosticOutput(sample, profile.modules);
-}
-
-// Checks that the expected number of profiles and samples are present in the
-// call stack profiles produced.
-PROFILER_TEST_F(StackSamplingProfilerTest, MultipleProfilesAndSamples) {
- SamplingParams params;
- params.burst_interval = params.sampling_interval =
- TimeDelta::FromMilliseconds(0);
- params.bursts = 2;
- params.samples_per_burst = 3;
-
- std::vector<CallStackProfile> profiles;
- CaptureProfiles(params, AVeryLongTimeDelta(), &profiles);
-
- ASSERT_EQ(2u, profiles.size());
- EXPECT_EQ(3u, profiles[0].samples.size());
- EXPECT_EQ(3u, profiles[1].samples.size());
+ << FormatSampleForDiagnosticOutput(frames);
}
// Checks that a profiler can stop/destruct without ever having started.
@@ -813,15 +838,19 @@ PROFILER_TEST_F(StackSamplingProfilerTest, StopWithoutStarting) {
WithTargetThread([](PlatformThreadId target_thread_id) {
SamplingParams params;
params.sampling_interval = TimeDelta::FromMilliseconds(0);
- params.samples_per_burst = 1;
+ params.samples_per_profile = 1;
- CallStackProfiles profiles;
+ Profile profile;
WaitableEvent sampling_completed(WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
- const StackSamplingProfiler::CompletedCallback callback =
- Bind(&SaveProfilesAndSignalEvent, Unretained(&profiles),
- Unretained(&sampling_completed));
- StackSamplingProfiler profiler(target_thread_id, params, callback);
+
+ StackSamplingProfiler profiler(
+ target_thread_id, params,
+ std::make_unique<TestProfileBuilder>(BindLambdaForTesting(
+ [&profile, &sampling_completed](Profile result_profile) {
+ profile = std::move(result_profile);
+ sampling_completed.Signal();
+ })));
profiler.Stop(); // Constructed but never started.
EXPECT_FALSE(sampling_completed.IsSignaled());
@@ -860,13 +889,13 @@ PROFILER_TEST_F(StackSamplingProfilerTest, StopSafely) {
// whatever interval the thread wakes up.
params[0].initial_delay = TimeDelta::FromMilliseconds(10);
params[0].sampling_interval = TimeDelta::FromMilliseconds(1);
- params[0].samples_per_burst = 100000;
+ params[0].samples_per_profile = 100000;
params[1].initial_delay = TimeDelta::FromMilliseconds(10);
params[1].sampling_interval = TimeDelta::FromMilliseconds(1);
- params[1].samples_per_burst = 100000;
+ params[1].samples_per_profile = 100000;
- SampleRecordedCounter samples_recorded[arraysize(params)];
+ SampleRecordedCounter samples_recorded[size(params)];
TestProfilerInfo profiler_info0(target_thread_id, params[0],
&samples_recorded[0]);
@@ -885,16 +914,16 @@ PROFILER_TEST_F(StackSamplingProfilerTest, StopSafely) {
// Ensure that the first sampler can be safely stopped while the second
// continues to run. The stopped first profiler will still have a
- // PerformCollectionTask pending that will do nothing when executed because
- // the collection will have been removed by Stop().
+ // RecordSampleTask pending that will do nothing when executed because the
+ // collection will have been removed by Stop().
profiler_info0.profiler.Stop();
profiler_info0.completed.Wait();
size_t count0 = samples_recorded[0].Get();
size_t count1 = samples_recorded[1].Get();
// Waiting for the second sampler to collect a couple samples ensures that
- // the pending PerformCollectionTask for the first has executed because
- // tasks are always ordered by their next scheduled time.
+ // the pending RecordSampleTask for the first has executed because tasks are
+ // always ordered by their next scheduled time.
while (samples_recorded[1].Get() < count1 + 2)
PlatformThread::Sleep(TimeDelta::FromMilliseconds(1));
@@ -903,36 +932,20 @@ PROFILER_TEST_F(StackSamplingProfilerTest, StopSafely) {
});
}
-// Checks that no call stack profiles are captured if the profiling is stopped
+// Checks that no internal frames are captured if the profiling is stopped
// during the initial delay.
PROFILER_TEST_F(StackSamplingProfilerTest, StopDuringInitialDelay) {
SamplingParams params;
params.initial_delay = TimeDelta::FromSeconds(60);
- std::vector<CallStackProfile> profiles;
- CaptureProfiles(params, TimeDelta::FromMilliseconds(0), &profiles);
-
- EXPECT_TRUE(profiles.empty());
-}
-
-// Checks that the single completed call stack profile is captured if the
-// profiling is stopped between bursts.
-PROFILER_TEST_F(StackSamplingProfilerTest, StopDuringInterBurstInterval) {
- SamplingParams params;
- params.sampling_interval = TimeDelta::FromMilliseconds(0);
- params.burst_interval = TimeDelta::FromSeconds(60);
- params.bursts = 2;
- params.samples_per_burst = 1;
-
- std::vector<CallStackProfile> profiles;
- CaptureProfiles(params, TimeDelta::FromMilliseconds(50), &profiles);
+ InternalFrameSets frame_sets =
+ CaptureFrameSets(params, TimeDelta::FromMilliseconds(0));
- ASSERT_EQ(1u, profiles.size());
- EXPECT_EQ(1u, profiles[0].samples.size());
+ EXPECT_TRUE(frame_sets.empty());
}
-// Checks that tasks can be stopped before completion and incomplete call stack
-// profiles are captured.
+// Checks that tasks can be stopped before completion and incomplete internal
+// frames are captured.
PROFILER_TEST_F(StackSamplingProfilerTest, StopDuringInterSampleInterval) {
// Test delegate that counts samples.
class SampleRecordedEvent : public NativeStackSamplerTestDelegate {
@@ -953,7 +966,7 @@ PROFILER_TEST_F(StackSamplingProfilerTest, StopDuringInterSampleInterval) {
SamplingParams params;
params.sampling_interval = AVeryLongTimeDelta();
- params.samples_per_burst = 2;
+ params.samples_per_profile = 2;
SampleRecordedEvent samples_recorded;
TestProfilerInfo profiler_info(target_thread_id, params, &samples_recorded);
@@ -967,8 +980,7 @@ PROFILER_TEST_F(StackSamplingProfilerTest, StopDuringInterSampleInterval) {
profiler_info.profiler.Stop();
profiler_info.completed.Wait();
- ASSERT_EQ(1u, profiler_info.profiles.size());
- EXPECT_EQ(1u, profiler_info.profiles[0].samples.size());
+ EXPECT_EQ(1u, profiler_info.profile.frame_sets.size());
});
}
@@ -977,11 +989,15 @@ PROFILER_TEST_F(StackSamplingProfilerTest, DestroyProfilerWhileProfiling) {
SamplingParams params;
params.sampling_interval = TimeDelta::FromMilliseconds(10);
- CallStackProfiles profiles;
- WithTargetThread([&params, &profiles](PlatformThreadId target_thread_id) {
+ Profile profile;
+ WithTargetThread([&params, &profile](PlatformThreadId target_thread_id) {
std::unique_ptr<StackSamplingProfiler> profiler;
- profiler.reset(new StackSamplingProfiler(
- target_thread_id, params, Bind(&SaveProfiles, Unretained(&profiles))));
+ auto profile_builder = std::make_unique<TestProfileBuilder>(
+ BindLambdaForTesting([&profile](Profile result_profile) {
+ profile = std::move(result_profile);
+ }));
+ profiler.reset(new StackSamplingProfiler(target_thread_id, params,
+ std::move(profile_builder)));
profiler->Start();
profiler.reset();
@@ -991,49 +1007,17 @@ PROFILER_TEST_F(StackSamplingProfilerTest, DestroyProfilerWhileProfiling) {
});
}
-// Checks that the same profiler may be run multiple times.
-PROFILER_TEST_F(StackSamplingProfilerTest, CanRunMultipleTimes) {
- WithTargetThread([](PlatformThreadId target_thread_id) {
- SamplingParams params;
- params.sampling_interval = TimeDelta::FromMilliseconds(0);
- params.samples_per_burst = 1;
-
- CallStackProfiles profiles;
- WaitableEvent sampling_completed(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
- const StackSamplingProfiler::CompletedCallback callback =
- Bind(&SaveProfilesAndSignalEvent, Unretained(&profiles),
- Unretained(&sampling_completed));
- StackSamplingProfiler profiler(target_thread_id, params, callback);
-
- // Just start and stop to execute code paths.
- profiler.Start();
- profiler.Stop();
- sampling_completed.Wait();
-
- // Ensure a second request will run and not block.
- sampling_completed.Reset();
- profiles.clear();
- profiler.Start();
- sampling_completed.Wait();
- profiler.Stop();
- ASSERT_EQ(1u, profiles.size());
- });
-}
-
// Checks that the different profilers may be run.
PROFILER_TEST_F(StackSamplingProfilerTest, CanRunMultipleProfilers) {
SamplingParams params;
params.sampling_interval = TimeDelta::FromMilliseconds(0);
- params.samples_per_burst = 1;
+ params.samples_per_profile = 1;
- std::vector<CallStackProfile> profiles;
- CaptureProfiles(params, AVeryLongTimeDelta(), &profiles);
- ASSERT_EQ(1u, profiles.size());
+ InternalFrameSets frame_sets = CaptureFrameSets(params, AVeryLongTimeDelta());
+ ASSERT_EQ(1u, frame_sets.size());
- profiles.clear();
- CaptureProfiles(params, AVeryLongTimeDelta(), &profiles);
- ASSERT_EQ(1u, profiles.size());
+ frame_sets = CaptureFrameSets(params, AVeryLongTimeDelta());
+ ASSERT_EQ(1u, frame_sets.size());
}
// Checks that a sampler can be started while another is running.
@@ -1042,10 +1026,10 @@ PROFILER_TEST_F(StackSamplingProfilerTest, MultipleStart) {
std::vector<SamplingParams> params(2);
params[0].initial_delay = AVeryLongTimeDelta();
- params[0].samples_per_burst = 1;
+ params[0].samples_per_profile = 1;
params[1].sampling_interval = TimeDelta::FromMilliseconds(1);
- params[1].samples_per_burst = 1;
+ params[1].samples_per_profile = 1;
std::vector<std::unique_ptr<TestProfilerInfo>> profiler_infos =
CreateProfilers(target_thread_id, params);
@@ -1053,7 +1037,35 @@ PROFILER_TEST_F(StackSamplingProfilerTest, MultipleStart) {
profiler_infos[0]->profiler.Start();
profiler_infos[1]->profiler.Start();
profiler_infos[1]->completed.Wait();
- EXPECT_EQ(1u, profiler_infos[1]->profiles.size());
+ EXPECT_EQ(1u, profiler_infos[1]->profile.frame_sets.size());
+ });
+}
+
+// Checks that the profile duration and the sampling interval are calculated
+// correctly. Also checks that RecordAnnotations() is invoked each time a sample
+// is recorded.
+PROFILER_TEST_F(StackSamplingProfilerTest, ProfileGeneralInfo) {
+ WithTargetThread([](PlatformThreadId target_thread_id) {
+ SamplingParams params;
+ params.sampling_interval = TimeDelta::FromMilliseconds(1);
+ params.samples_per_profile = 3;
+
+ TestProfilerInfo profiler_info(target_thread_id, params);
+
+ profiler_info.profiler.Start();
+ profiler_info.completed.Wait();
+ EXPECT_EQ(3u, profiler_info.profile.frame_sets.size());
+
+ // The profile duration should be greater than the total sampling intervals.
+ EXPECT_GT(profiler_info.profile.profile_duration,
+ profiler_info.profile.sampling_period * 3);
+
+ EXPECT_EQ(TimeDelta::FromMilliseconds(1),
+ profiler_info.profile.sampling_period);
+
+ // The number of invocations of RecordAnnotations() should be equal to the
+ // number of samples recorded.
+ EXPECT_EQ(3, profiler_info.profile.annotation_count);
});
}
@@ -1061,11 +1073,10 @@ PROFILER_TEST_F(StackSamplingProfilerTest, MultipleStart) {
PROFILER_TEST_F(StackSamplingProfilerTest, SamplerIdleShutdown) {
SamplingParams params;
params.sampling_interval = TimeDelta::FromMilliseconds(0);
- params.samples_per_burst = 1;
+ params.samples_per_profile = 1;
- std::vector<CallStackProfile> profiles;
- CaptureProfiles(params, AVeryLongTimeDelta(), &profiles);
- ASSERT_EQ(1u, profiles.size());
+ InternalFrameSets frame_sets = CaptureFrameSets(params, AVeryLongTimeDelta());
+ ASSERT_EQ(1u, frame_sets.size());
// Capture thread should still be running at this point.
ASSERT_TRUE(StackSamplingProfiler::TestAPI::IsSamplingThreadRunning());
@@ -1079,7 +1090,7 @@ PROFILER_TEST_F(StackSamplingProfilerTest, SamplerIdleShutdown) {
// happens asynchronously. Watch until the thread actually exits. This test
// will time-out in the case of failure.
while (StackSamplingProfiler::TestAPI::IsSamplingThreadRunning())
- PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(1));
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(1));
}
// Checks that additional requests will restart a stopped profiler.
@@ -1087,11 +1098,10 @@ PROFILER_TEST_F(StackSamplingProfilerTest,
WillRestartSamplerAfterIdleShutdown) {
SamplingParams params;
params.sampling_interval = TimeDelta::FromMilliseconds(0);
- params.samples_per_burst = 1;
+ params.samples_per_profile = 1;
- std::vector<CallStackProfile> profiles;
- CaptureProfiles(params, AVeryLongTimeDelta(), &profiles);
- ASSERT_EQ(1u, profiles.size());
+ InternalFrameSets frame_sets = CaptureFrameSets(params, AVeryLongTimeDelta());
+ ASSERT_EQ(1u, frame_sets.size());
// Capture thread should still be running at this point.
ASSERT_TRUE(StackSamplingProfiler::TestAPI::IsSamplingThreadRunning());
@@ -1101,9 +1111,8 @@ PROFILER_TEST_F(StackSamplingProfilerTest,
StackSamplingProfiler::TestAPI::PerformSamplingThreadIdleShutdown(false);
// Ensure another capture will start the sampling thread and run.
- profiles.clear();
- CaptureProfiles(params, AVeryLongTimeDelta(), &profiles);
- ASSERT_EQ(1u, profiles.size());
+ frame_sets = CaptureFrameSets(params, AVeryLongTimeDelta());
+ ASSERT_EQ(1u, frame_sets.size());
EXPECT_TRUE(StackSamplingProfiler::TestAPI::IsSamplingThreadRunning());
}
@@ -1114,7 +1123,7 @@ PROFILER_TEST_F(StackSamplingProfilerTest, StopAfterIdleShutdown) {
SamplingParams params;
params.sampling_interval = TimeDelta::FromMilliseconds(1);
- params.samples_per_burst = 1;
+ params.samples_per_profile = 1;
TestProfilerInfo profiler_info(target_thread_id, params);
@@ -1142,11 +1151,11 @@ PROFILER_TEST_F(StackSamplingProfilerTest,
params[0].initial_delay = AVeryLongTimeDelta();
params[0].sampling_interval = TimeDelta::FromMilliseconds(1);
- params[0].samples_per_burst = 1;
+ params[0].samples_per_profile = 1;
params[1].initial_delay = TimeDelta::FromMilliseconds(0);
params[1].sampling_interval = TimeDelta::FromMilliseconds(1);
- params[1].samples_per_burst = 1;
+ params[1].samples_per_profile = 1;
std::vector<std::unique_ptr<TestProfilerInfo>> profiler_infos =
CreateProfilers(target_thread_id, params);
@@ -1171,13 +1180,13 @@ PROFILER_TEST_F(StackSamplingProfilerTest, IdleShutdownAbort) {
SamplingParams params;
params.sampling_interval = TimeDelta::FromMilliseconds(1);
- params.samples_per_burst = 1;
+ params.samples_per_profile = 1;
TestProfilerInfo profiler_info(target_thread_id, params);
profiler_info.profiler.Start();
profiler_info.completed.Wait();
- EXPECT_EQ(1u, profiler_info.profiles.size());
+ EXPECT_EQ(1u, profiler_info.profile.frame_sets.size());
// Perform an idle shutdown but simulate that a new capture is started
// before it can actually run.
@@ -1188,14 +1197,14 @@ PROFILER_TEST_F(StackSamplingProfilerTest, IdleShutdownAbort) {
// except to wait a reasonable amount of time and then check. Since the
// thread was just running ("perform" blocked until it was), it should
// finish almost immediately and without any waiting for tasks or events.
- PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(200));
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(200));
EXPECT_TRUE(StackSamplingProfiler::TestAPI::IsSamplingThreadRunning());
// Ensure that it's still possible to run another sampler.
TestProfilerInfo another_info(target_thread_id, params);
another_info.profiler.Start();
another_info.completed.Wait();
- EXPECT_EQ(1u, another_info.profiles.size());
+ EXPECT_EQ(1u, another_info.profile.frame_sets.size());
});
}
@@ -1211,11 +1220,11 @@ PROFILER_TEST_F(StackSamplingProfilerTest, ConcurrentProfiling_InSync) {
// will be 10ms (delay) + 10x1ms (sampling) + 1/2 timer minimum interval.
params[0].initial_delay = TimeDelta::FromMilliseconds(10);
params[0].sampling_interval = TimeDelta::FromMilliseconds(1);
- params[0].samples_per_burst = 9;
+ params[0].samples_per_profile = 9;
params[1].initial_delay = TimeDelta::FromMilliseconds(11);
params[1].sampling_interval = TimeDelta::FromMilliseconds(1);
- params[1].samples_per_burst = 8;
+ params[1].samples_per_profile = 8;
std::vector<std::unique_ptr<TestProfilerInfo>> profiler_infos =
CreateProfilers(target_thread_id, params);
@@ -1225,16 +1234,14 @@ PROFILER_TEST_F(StackSamplingProfilerTest, ConcurrentProfiling_InSync) {
// Wait for one profiler to finish.
size_t completed_profiler = WaitForSamplingComplete(profiler_infos);
- ASSERT_EQ(1u, profiler_infos[completed_profiler]->profiles.size());
size_t other_profiler = 1 - completed_profiler;
// Wait for the other profiler to finish.
profiler_infos[other_profiler]->completed.Wait();
- ASSERT_EQ(1u, profiler_infos[other_profiler]->profiles.size());
- // Ensure each got the correct number of samples.
- EXPECT_EQ(9u, profiler_infos[0]->profiles[0].samples.size());
- EXPECT_EQ(8u, profiler_infos[1]->profiles[0].samples.size());
+ // Ensure each got the correct number of frame sets.
+ EXPECT_EQ(9u, profiler_infos[0]->profile.frame_sets.size());
+ EXPECT_EQ(8u, profiler_infos[1]->profile.frame_sets.size());
});
}
@@ -1245,15 +1252,15 @@ PROFILER_TEST_F(StackSamplingProfilerTest, ConcurrentProfiling_Mixed) {
params[0].initial_delay = TimeDelta::FromMilliseconds(8);
params[0].sampling_interval = TimeDelta::FromMilliseconds(4);
- params[0].samples_per_burst = 10;
+ params[0].samples_per_profile = 10;
params[1].initial_delay = TimeDelta::FromMilliseconds(9);
params[1].sampling_interval = TimeDelta::FromMilliseconds(3);
- params[1].samples_per_burst = 10;
+ params[1].samples_per_profile = 10;
params[2].initial_delay = TimeDelta::FromMilliseconds(10);
params[2].sampling_interval = TimeDelta::FromMilliseconds(2);
- params[2].samples_per_burst = 10;
+ params[2].samples_per_profile = 10;
std::vector<std::unique_ptr<TestProfilerInfo>> profiler_infos =
CreateProfilers(target_thread_id, params);
@@ -1263,7 +1270,8 @@ PROFILER_TEST_F(StackSamplingProfilerTest, ConcurrentProfiling_Mixed) {
// Wait for one profiler to finish.
size_t completed_profiler = WaitForSamplingComplete(profiler_infos);
- EXPECT_EQ(1u, profiler_infos[completed_profiler]->profiles.size());
+ EXPECT_EQ(10u,
+ profiler_infos[completed_profiler]->profile.frame_sets.size());
// Stop and destroy all profilers, always in the same order. Don't crash.
for (size_t i = 0; i < profiler_infos.size(); ++i)
profiler_infos[i]->profiler.Stop();
@@ -1283,20 +1291,24 @@ PROFILER_TEST_F(StackSamplingProfilerTest, ConcurrentProfiling_Mixed) {
PROFILER_TEST_F(StackSamplingProfilerTest, MAYBE_OtherLibrary) {
SamplingParams params;
params.sampling_interval = TimeDelta::FromMilliseconds(0);
- params.samples_per_burst = 1;
+ params.samples_per_profile = 1;
- std::vector<CallStackProfile> profiles;
+ Profile profile;
{
ScopedNativeLibrary other_library(LoadOtherLibrary());
WithTargetThread(
- [&params, &profiles](PlatformThreadId target_thread_id) {
+ [&params, &profile](PlatformThreadId target_thread_id) {
WaitableEvent sampling_thread_completed(
WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
- const StackSamplingProfiler::CompletedCallback callback =
- Bind(&SaveProfilesAndSignalEvent, Unretained(&profiles),
- Unretained(&sampling_thread_completed));
- StackSamplingProfiler profiler(target_thread_id, params, callback);
+ StackSamplingProfiler profiler(
+ target_thread_id, params,
+ std::make_unique<TestProfileBuilder>(
+ BindLambdaForTesting([&profile, &sampling_thread_completed](
+ Profile result_profile) {
+ profile = std::move(result_profile);
+ sampling_thread_completed.Signal();
+ })));
profiler.Start();
sampling_thread_completed.Wait();
},
@@ -1304,33 +1316,32 @@ PROFILER_TEST_F(StackSamplingProfilerTest, MAYBE_OtherLibrary) {
other_library.get()));
}
- // Look up the sample.
- ASSERT_EQ(1u, profiles.size());
- const CallStackProfile& profile = profiles[0];
- ASSERT_EQ(1u, profile.samples.size());
- const Sample& sample = profile.samples[0];
+ // Look up the frames.
+ ASSERT_EQ(1u, profile.frame_sets.size());
+ const InternalFrames& frames = profile.frame_sets[0];
// Check that the stack contains a frame for
// TargetThread::CallThroughOtherLibrary().
- Frames::const_iterator other_library_frame = FindFirstFrameWithinFunction(
- sample, &TargetThread::CallThroughOtherLibrary);
- ASSERT_TRUE(other_library_frame != sample.frames.end())
+ InternalFrames::const_iterator other_library_frame =
+ FindFirstFrameWithinFunction(frames,
+ &TargetThread::CallThroughOtherLibrary);
+ ASSERT_TRUE(other_library_frame != frames.end())
<< "Function at "
<< MaybeFixupFunctionAddressForILT(reinterpret_cast<const void*>(
&TargetThread::CallThroughOtherLibrary))
<< " was not found in stack:\n"
- << FormatSampleForDiagnosticOutput(sample, profile.modules);
+ << FormatSampleForDiagnosticOutput(frames);
// Check that the stack contains a frame for
// TargetThread::SignalAndWaitUntilSignaled().
- Frames::const_iterator end_frame = FindFirstFrameWithinFunction(
- sample, &TargetThread::SignalAndWaitUntilSignaled);
- ASSERT_TRUE(end_frame != sample.frames.end())
+ InternalFrames::const_iterator end_frame = FindFirstFrameWithinFunction(
+ frames, &TargetThread::SignalAndWaitUntilSignaled);
+ ASSERT_TRUE(end_frame != frames.end())
<< "Function at "
<< MaybeFixupFunctionAddressForILT(reinterpret_cast<const void*>(
&TargetThread::SignalAndWaitUntilSignaled))
<< " was not found in stack:\n"
- << FormatSampleForDiagnosticOutput(sample, profile.modules);
+ << FormatSampleForDiagnosticOutput(frames);
// The stack should look like this, resulting in three frames between
// SignalAndWaitUntilSignaled and CallThroughOtherLibrary:
@@ -1341,7 +1352,8 @@ PROFILER_TEST_F(StackSamplingProfilerTest, MAYBE_OtherLibrary) {
// InvokeCallbackFunction (in other library)
// TargetThread::CallThroughOtherLibrary
EXPECT_EQ(3, other_library_frame - end_frame)
- << "Stack:\n" << FormatSampleForDiagnosticOutput(sample, profile.modules);
+ << "Stack:\n"
+ << FormatSampleForDiagnosticOutput(frames);
}
// Checks that a stack that runs through a library that is unloading produces a
@@ -1389,38 +1401,42 @@ PROFILER_TEST_F(StackSamplingProfilerTest, MultipleSampledThreads) {
SamplingParams params1, params2;
params1.initial_delay = TimeDelta::FromMilliseconds(10);
params1.sampling_interval = TimeDelta::FromMilliseconds(1);
- params1.samples_per_burst = 9;
+ params1.samples_per_profile = 9;
params2.initial_delay = TimeDelta::FromMilliseconds(10);
params2.sampling_interval = TimeDelta::FromMilliseconds(1);
- params2.samples_per_burst = 8;
+ params2.samples_per_profile = 8;
- std::vector<CallStackProfile> profiles1, profiles2;
+ Profile profile1, profile2;
WaitableEvent sampling_thread_completed1(
WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
- const StackSamplingProfiler::CompletedCallback callback1 =
- Bind(&SaveProfilesAndSignalEvent, Unretained(&profiles1),
- Unretained(&sampling_thread_completed1));
- StackSamplingProfiler profiler1(target_thread1.id(), params1, callback1);
+ StackSamplingProfiler profiler1(
+ target_thread1.id(), params1,
+ std::make_unique<TestProfileBuilder>(BindLambdaForTesting(
+ [&profile1, &sampling_thread_completed1](Profile result_profile) {
+ profile1 = std::move(result_profile);
+ sampling_thread_completed1.Signal();
+ })));
WaitableEvent sampling_thread_completed2(
WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
- const StackSamplingProfiler::CompletedCallback callback2 =
- Bind(&SaveProfilesAndSignalEvent, Unretained(&profiles2),
- Unretained(&sampling_thread_completed2));
- StackSamplingProfiler profiler2(target_thread2.id(), params2, callback2);
+ StackSamplingProfiler profiler2(
+ target_thread2.id(), params2,
+ std::make_unique<TestProfileBuilder>(BindLambdaForTesting(
+ [&profile2, &sampling_thread_completed2](Profile result_profile) {
+ profile2 = std::move(result_profile);
+ sampling_thread_completed2.Signal();
+ })));
// Finally the real work.
profiler1.Start();
profiler2.Start();
sampling_thread_completed1.Wait();
sampling_thread_completed2.Wait();
- ASSERT_EQ(1u, profiles1.size());
- EXPECT_EQ(9u, profiles1[0].samples.size());
- ASSERT_EQ(1u, profiles2.size());
- EXPECT_EQ(8u, profiles2[0].samples.size());
+ EXPECT_EQ(9u, profile1.frame_sets.size());
+ EXPECT_EQ(8u, profile2.frame_sets.size());
target_thread1.SignalThreadToFinish();
target_thread2.SignalThreadToFinish();
@@ -1441,9 +1457,11 @@ class ProfilerThread : public SimpleThread {
WaitableEvent::InitialState::NOT_SIGNALED),
profiler_(thread_id,
params,
- Bind(&SaveProfilesAndSignalEvent,
- Unretained(&profiles_),
- Unretained(&completed_))) {}
+ std::make_unique<TestProfileBuilder>(
+ BindLambdaForTesting([this](Profile result_profile) {
+ profile_ = std::move(result_profile);
+ completed_.Signal();
+ }))) {}
void Run() override {
run_.Wait();
@@ -1454,12 +1472,12 @@ class ProfilerThread : public SimpleThread {
void Wait() { completed_.Wait(); }
- CallStackProfiles& profiles() { return profiles_; }
+ Profile& profile() { return profile_; }
private:
WaitableEvent run_;
- CallStackProfiles profiles_;
+ Profile profile_;
WaitableEvent completed_;
StackSamplingProfiler profiler_;
};
@@ -1474,17 +1492,17 @@ PROFILER_TEST_F(StackSamplingProfilerTest, MultipleProfilerThreads) {
SamplingParams params1, params2;
params1.initial_delay = TimeDelta::FromMilliseconds(10);
params1.sampling_interval = TimeDelta::FromMilliseconds(1);
- params1.samples_per_burst = 9;
+ params1.samples_per_profile = 9;
params2.initial_delay = TimeDelta::FromMilliseconds(10);
params2.sampling_interval = TimeDelta::FromMilliseconds(1);
- params2.samples_per_burst = 8;
+ params2.samples_per_profile = 8;
// Start the profiler threads and give them a moment to get going.
ProfilerThread profiler_thread1("profiler1", target_thread_id, params1);
ProfilerThread profiler_thread2("profiler2", target_thread_id, params2);
profiler_thread1.Start();
profiler_thread2.Start();
- PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(10));
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(10));
// This will (approximately) synchronize the two threads.
profiler_thread1.Go();
@@ -1493,10 +1511,8 @@ PROFILER_TEST_F(StackSamplingProfilerTest, MultipleProfilerThreads) {
// Wait for them both to finish and validate collection.
profiler_thread1.Wait();
profiler_thread2.Wait();
- ASSERT_EQ(1u, profiler_thread1.profiles().size());
- EXPECT_EQ(9u, profiler_thread1.profiles()[0].samples.size());
- ASSERT_EQ(1u, profiler_thread2.profiles().size());
- EXPECT_EQ(8u, profiler_thread2.profiles()[0].samples.size());
+ EXPECT_EQ(9u, profiler_thread1.profile().frame_sets.size());
+ EXPECT_EQ(8u, profiler_thread2.profile().frame_sets.size());
profiler_thread1.Join();
profiler_thread2.Join();
diff --git a/chromium/base/profiler/win32_stack_frame_unwinder.cc b/chromium/base/profiler/win32_stack_frame_unwinder.cc
index 9e6ab392524..a3f5f74b853 100644
--- a/chromium/base/profiler/win32_stack_frame_unwinder.cc
+++ b/chromium/base/profiler/win32_stack_frame_unwinder.cc
@@ -67,7 +67,7 @@ PRUNTIME_FUNCTION Win32UnwindFunctions::LookupFunctionEntry(
DWORD64 program_counter,
PDWORD64 image_base) {
#ifdef _WIN64
- return RtlLookupFunctionEntry(program_counter, image_base, nullptr);
+ return ::RtlLookupFunctionEntry(program_counter, image_base, nullptr);
#else
NOTREACHED();
return nullptr;
@@ -82,9 +82,9 @@ void Win32UnwindFunctions::VirtualUnwind(DWORD64 image_base,
void* handler_data;
ULONG64 establisher_frame;
KNONVOLATILE_CONTEXT_POINTERS nvcontext = {};
- RtlVirtualUnwind(UNW_FLAG_NHANDLER, image_base, program_counter,
- runtime_function, context, &handler_data,
- &establisher_frame, &nvcontext);
+ ::RtlVirtualUnwind(UNW_FLAG_NHANDLER, image_base, program_counter,
+ runtime_function, context, &handler_data,
+ &establisher_frame, &nvcontext);
#else
NOTREACHED();
#endif
diff --git a/chromium/base/rand_util.h b/chromium/base/rand_util.h
index 03bf46f74c6..45e42832237 100644
--- a/chromium/base/rand_util.h
+++ b/chromium/base/rand_util.h
@@ -69,7 +69,7 @@ void RandomShuffle(Itr first, Itr last) {
std::shuffle(first, last, RandomBitGenerator());
}
-#if defined(OS_POSIX) && !defined(OS_FUCHSIA)
+#if defined(OS_POSIX)
BASE_EXPORT int GetUrandomFD();
#endif
diff --git a/chromium/base/rand_util_fuchsia.cc b/chromium/base/rand_util_fuchsia.cc
index 5f991d5cc05..743c709898a 100644
--- a/chromium/base/rand_util_fuchsia.cc
+++ b/chromium/base/rand_util_fuchsia.cc
@@ -6,28 +6,10 @@
#include <zircon/syscalls.h>
-#include <algorithm>
-
-#include "base/logging.h"
-
namespace base {
void RandBytes(void* output, size_t output_length) {
- size_t remaining = output_length;
- unsigned char* cur = reinterpret_cast<unsigned char*>(output);
- while (remaining > 0) {
- // The syscall has a maximum number of bytes that can be read at once.
- size_t read_len =
- std::min(remaining, static_cast<size_t>(ZX_CPRNG_DRAW_MAX_LEN));
-
- size_t actual;
- zx_status_t status = zx_cprng_draw(cur, read_len, &actual);
- CHECK(status == ZX_OK && read_len == actual);
-
- CHECK(remaining >= actual);
- remaining -= actual;
- cur += actual;
- }
+ zx_cprng_draw(output, output_length);
}
} // namespace base
diff --git a/chromium/base/run_loop.cc b/chromium/base/run_loop.cc
index 3882f642a6a..d4d87d72a0a 100644
--- a/chromium/base/run_loop.cc
+++ b/chromium/base/run_loop.cc
@@ -152,6 +152,7 @@ void RunLoop::QuitWhenIdle() {
base::Closure RunLoop::QuitClosure() {
// TODO(gab): Fix bad usage and enable this check, http://crbug.com/715235.
// DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ allow_quit_current_deprecated_ = false;
// Need to use ProxyToTaskRunner() as WeakPtrs vended from
// |weak_factory_| may only be accessed on |origin_task_runner_|.
@@ -163,6 +164,7 @@ base::Closure RunLoop::QuitClosure() {
base::Closure RunLoop::QuitWhenIdleClosure() {
// TODO(gab): Fix bad usage and enable this check, http://crbug.com/715235.
// DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ allow_quit_current_deprecated_ = false;
// Need to use ProxyToTaskRunner() as WeakPtrs vended from
// |weak_factory_| may only be accessed on |origin_task_runner_|.
@@ -201,17 +203,29 @@ void RunLoop::RemoveNestingObserverOnCurrentThread(NestingObserver* observer) {
// static
void RunLoop::QuitCurrentDeprecated() {
DCHECK(IsRunningOnCurrentThread());
- tls_delegate.Get().Get()->active_run_loops_.top()->Quit();
+ Delegate* delegate = tls_delegate.Get().Get();
+ DCHECK(delegate->active_run_loops_.top()->allow_quit_current_deprecated_)
+ << "Please migrate off QuitCurrentDeprecated(), e.g. to QuitClosure().";
+ delegate->active_run_loops_.top()->Quit();
}
// static
void RunLoop::QuitCurrentWhenIdleDeprecated() {
DCHECK(IsRunningOnCurrentThread());
- tls_delegate.Get().Get()->active_run_loops_.top()->QuitWhenIdle();
+ Delegate* delegate = tls_delegate.Get().Get();
+ DCHECK(delegate->active_run_loops_.top()->allow_quit_current_deprecated_)
+ << "Please migrate off QuitCurrentWhenIdleDeprecated(), e.g. to "
+ "QuitWhenIdleClosure().";
+ delegate->active_run_loops_.top()->QuitWhenIdle();
}
// static
Closure RunLoop::QuitCurrentWhenIdleClosureDeprecated() {
+ // TODO(844016): Fix callsites and enable this check, or remove the API.
+ // Delegate* delegate = tls_delegate.Get().Get();
+ // DCHECK(delegate->active_run_loops_.top()->allow_quit_current_deprecated_)
+ // << "Please migrate off QuitCurrentWhenIdleClosureDeprecated(), e.g to "
+ // "QuitWhenIdleClosure().";
return Bind(&RunLoop::QuitCurrentWhenIdleDeprecated);
}
diff --git a/chromium/base/run_loop.h b/chromium/base/run_loop.h
index 719f928a972..2582a69f1dd 100644
--- a/chromium/base/run_loop.h
+++ b/chromium/base/run_loop.h
@@ -248,6 +248,8 @@ class BASE_EXPORT RunLoop {
};
private:
+ FRIEND_TEST_ALL_PREFIXES(MessageLoopTypedTest, RunLoopQuitOrderAfter);
+
#if defined(OS_ANDROID)
// Android doesn't support the blocking RunLoop::Run, so it calls
// BeforeRun and AfterRun directly.
@@ -283,6 +285,11 @@ class BASE_EXPORT RunLoop {
// rather than pushed to Delegate to support nested RunLoops.
bool quit_when_idle_received_ = false;
+ // True if use of QuitCurrent*Deprecated() is allowed. Taking a Quit*Closure()
+ // from a RunLoop implicitly sets this to false, so QuitCurrent*Deprecated()
+ // cannot be used while that RunLoop is being Run().
+ bool allow_quit_current_deprecated_ = true;
+
// RunLoop is not thread-safe. Its state/methods, unless marked as such, may
// not be accessed from any other sequence than the thread it was constructed
// on. Exception: RunLoop can be safely accessed from one other sequence (or
diff --git a/chromium/base/sampling_heap_profiler/benchmark-octane.js b/chromium/base/sampling_heap_profiler/benchmark-octane.js
index 1e59af52292..602e152756e 100644
--- a/chromium/base/sampling_heap_profiler/benchmark-octane.js
+++ b/chromium/base/sampling_heap_profiler/benchmark-octane.js
@@ -4,63 +4,55 @@
// To benchmark a specific version of Chrome set the CHROME_PATH environment
// variable, e.g.:
-// $ CHROME_PATH=~/chromium/out/Release/chrome node benchmark-octane.js
+// $ CHROME_PATH=~/chromium/src/out/Release/chrome node benchmark-octane.js
const puppeteer = require('puppeteer');
-let base_score;
-
async function runOctane(samplingRate) {
const args = ['--enable-devtools-experiments'];
if (samplingRate)
args.push(`--sampling-heap-profiler=${samplingRate}`);
- const browser = await puppeteer.launch({
- executablePath: process.env.CHROME_PATH, args, headless: true});
- try {
- const page = await browser.newPage();
- await page.goto('https://chromium.github.io/octane/');
- await page.waitForSelector('#run-octane'); // Just in case.
- await page.click('#run-octane');
-
- const scoreDiv = await page.waitForSelector('#main-banner:only-child',
- {timeout: 120000});
- const scoreText = await page.evaluate(e => e.innerText, scoreDiv);
- const match = /Score:\s*(\d+)/.exec(scoreText);
- if (match.length < 2) {
- console.log(`Error: cannot parse score from '${scoreText}'`);
- return 0;
+ while (true) {
+ let brower;
+ try {
+ browser = await puppeteer.launch({
+ executablePath: process.env.CHROME_PATH, args, headless: true});
+ const page = await browser.newPage();
+ await page.goto('https://chromium.github.io/octane/');
+ await page.waitForSelector('#run-octane'); // Just in case.
+ await page.click('#run-octane');
+
+ const scoreDiv = await page.waitForSelector('#main-banner:only-child',
+ {timeout: 120000});
+ const scoreText = await page.evaluate(e => e.innerText, scoreDiv);
+ const match = /Score:\s*(\d+)/.exec(scoreText);
+ if (match.length < 2)
+ continue;
+ return parseInt(match[1]);
+ } finally {
+ if (browser)
+ await browser.close();
}
- return parseInt(match[1]);
- } finally {
- await browser.close();
}
}
-async function makeRuns(rate) {
- console.log(`tesing rate: ${rate}`);
- let sum = 0;
- let sum2 = 0;
- const n = 10;
- for (let i = 0; i < n; ++i) {
- const score = await runOctane(rate);
- console.log(score);
- sum += score;
- sum2 += score * score;
- }
- const mean = sum / n;
- const stdev = Math.sqrt(sum2 / n - mean * mean);
- console.log(`rate: ${rate} mean: ${mean} stdev: ${stdev}`);
- return mean;
+async function makeRuns(rates) {
+ const scores = [];
+ for (const rate of rates)
+ scores.push(await runOctane(rate));
+ console.log(scores.join('\t'));
}
async function main() {
console.log(`Using ${process.env.CHROME_PATH || puppeteer.executablePath()}`);
- const base_score = await makeRuns(0);
- for (let rate = 32; rate <= 2048; rate *= 2) {
- const score = await makeRuns(rate);
- console.log(`slowdown: ${(100 - score / base_score * 100).toFixed(2)}%\n`);
- }
+ const rates = [0];
+ for (let rate = 8; rate <= 2048; rate *= 2)
+ rates.push(rate);
+ console.log('Rates [KB]:');
+ console.log(rates.join('\t'));
+ console.log('='.repeat(rates.length * 8));
+ for (let i = 0; i < 100; ++i)
+ await makeRuns(rates);
}
main();
-
diff --git a/chromium/base/sampling_heap_profiler/lock_free_address_hash_set.cc b/chromium/base/sampling_heap_profiler/lock_free_address_hash_set.cc
new file mode 100644
index 00000000000..d06dc5c79e0
--- /dev/null
+++ b/chromium/base/sampling_heap_profiler/lock_free_address_hash_set.cc
@@ -0,0 +1,72 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sampling_heap_profiler/lock_free_address_hash_set.h"
+
+#include <limits>
+
+#include "base/bits.h"
+
+namespace base {
+
+LockFreeAddressHashSet::LockFreeAddressHashSet(size_t buckets_count)
+ : buckets_(buckets_count), bucket_mask_(buckets_count - 1) {
+ DCHECK(bits::IsPowerOfTwo(buckets_count));
+ DCHECK(bucket_mask_ <= std::numeric_limits<uint32_t>::max());
+}
+
+LockFreeAddressHashSet::~LockFreeAddressHashSet() {
+ for (subtle::AtomicWord bucket : buckets_) {
+ Node* node = reinterpret_cast<Node*>(bucket);
+ while (node) {
+ Node* next = reinterpret_cast<Node*>(node->next);
+ delete node;
+ node = next;
+ }
+ }
+}
+
+void LockFreeAddressHashSet::Insert(void* key) {
+ // TODO(alph): Replace with DCHECK.
+ CHECK(key != nullptr);
+ CHECK(!Contains(key));
+ subtle::NoBarrier_AtomicIncrement(&size_, 1);
+ uint32_t h = Hash(key);
+ subtle::AtomicWord* bucket_ptr = &buckets_[h & bucket_mask_];
+ Node* node = reinterpret_cast<Node*>(subtle::NoBarrier_Load(bucket_ptr));
+ // First iterate over the bucket nodes and try to reuse an empty one if found.
+ for (; node != nullptr; node = next_node(node)) {
+ if (subtle::NoBarrier_CompareAndSwap(
+ &node->key, 0, reinterpret_cast<subtle::AtomicWord>(key)) == 0) {
+ return;
+ }
+ }
+ DCHECK(node == nullptr);
+ // There are no empty nodes to reuse in the bucket.
+ // Create a new node and prepend it to the list.
+ Node* new_node = new Node(key);
+ subtle::AtomicWord current_head = subtle::NoBarrier_Load(bucket_ptr);
+ subtle::AtomicWord expected_head;
+ do {
+ subtle::NoBarrier_Store(&new_node->next, current_head);
+ expected_head = current_head;
+ current_head = subtle::Release_CompareAndSwap(
+ bucket_ptr, current_head,
+ reinterpret_cast<subtle::AtomicWord>(new_node));
+ } while (current_head != expected_head);
+}
+
+void LockFreeAddressHashSet::Copy(const LockFreeAddressHashSet& other) {
+ DCHECK_EQ(0u, size());
+ for (subtle::AtomicWord bucket : other.buckets_) {
+ for (Node* node = reinterpret_cast<Node*>(bucket); node;
+ node = next_node(node)) {
+ subtle::AtomicWord k = subtle::NoBarrier_Load(&node->key);
+ if (k)
+ Insert(reinterpret_cast<void*>(k));
+ }
+ }
+}
+
+} // namespace base
diff --git a/chromium/base/sampling_heap_profiler/lock_free_address_hash_set.h b/chromium/base/sampling_heap_profiler/lock_free_address_hash_set.h
new file mode 100644
index 00000000000..7e96b89adc8
--- /dev/null
+++ b/chromium/base/sampling_heap_profiler/lock_free_address_hash_set.h
@@ -0,0 +1,152 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SAMPLING_HEAP_PROFILER_LOCK_FREE_ADDRESS_HASH_SET_H_
+#define BASE_SAMPLING_HEAP_PROFILER_LOCK_FREE_ADDRESS_HASH_SET_H_
+
+#include <cstdint>
+#include <vector>
+
+#include "base/atomicops.h"
+#include "base/logging.h"
+
+namespace base {
+
+// A hash set container that provides lock-free versions of
+// |Insert|, |Remove|, and |Contains| operations.
+// It does not support concurrent write operations |Insert| and |Remove|
+// over the same key. Concurrent writes of distinct keys are ok.
+// |Contains| method can be executed concurrently with other |Insert|, |Remove|,
+// or |Contains| even over the same key.
+// However, please note the result of concurrent execution of |Contains|
+// with |Insert| or |Remove| is racy.
+//
+// The hash set never rehashes, so the number of buckets stays the same
+// for the lifetime of the set.
+//
+// Internally the hashset is implemented as a vector of N buckets
+// (N has to be a power of 2). Each bucket holds a single-linked list of
+// nodes each corresponding to a key.
+// It is not possible to really delete nodes from the list as there might
+// be concurrent reads being executed over the node. The |Remove| operation
+// just marks the node as empty by placing nullptr into its key field.
+// Consequent |Insert| operations may reuse empty nodes when possible.
+//
+// The structure of the hashset for N buckets is the following:
+// 0: {*}--> {key1,*}--> {key2,*}--> NULL
+// 1: {*}--> NULL
+// 2: {*}--> {NULL,*}--> {key3,*}--> {key4,*}--> NULL
+// ...
+// N-1: {*}--> {keyM,*}--> NULL
+class BASE_EXPORT LockFreeAddressHashSet {
+ public:
+ explicit LockFreeAddressHashSet(size_t buckets_count);
+ ~LockFreeAddressHashSet();
+
+ // Checks if the |key| is in the set. Can be executed concurrently with
+ // |Insert|, |Remove|, and |Contains| operations.
+ bool Contains(void* key) const;
+
+ // Removes the |key| from the set. The key must be present in the set before
+ // the invocation.
+ // Can be concurrent with other |Insert| and |Remove| executions, provided
+ // they operate over distinct keys.
+ // Concurrent |Insert| or |Remove| executions over the same key are not
+ // supported.
+ void Remove(void* key);
+
+ // Inserts the |key| into the set. The key must not be present in the set
+ // before the invocation.
+ // Can be concurrent with other |Insert| and |Remove| executions, provided
+ // they operate over distinct keys.
+ // Concurrent |Insert| or |Remove| executions over the same key are not
+ // supported.
+ void Insert(void* key);
+
+ // Copies contents of |other| set into the current set. The current set
+ // must be empty before the call.
+ // The operation cannot be executed concurrently with any other methods.
+ void Copy(const LockFreeAddressHashSet& other);
+
+ size_t buckets_count() const { return buckets_.size(); }
+ size_t size() const {
+ return static_cast<size_t>(subtle::NoBarrier_Load(&size_));
+ }
+
+ // Returns the average bucket utilization.
+ float load_factor() const { return 1.f * size() / buckets_.size(); }
+
+ private:
+ friend class LockFreeAddressHashSetTest;
+
+ struct Node {
+ Node() : key(0), next(0) {}
+ explicit Node(void* key);
+
+ subtle::AtomicWord key;
+ subtle::AtomicWord next;
+ };
+
+ static uint32_t Hash(void* key);
+ Node* FindNode(void* key) const;
+ Node* Bucket(void* key) const;
+ static Node* next_node(Node* node) {
+ return reinterpret_cast<Node*>(subtle::NoBarrier_Load(&node->next));
+ }
+
+ std::vector<subtle::AtomicWord> buckets_;
+ size_t bucket_mask_;
+ subtle::AtomicWord size_ = 0;
+};
+
+inline LockFreeAddressHashSet::Node::Node(void* a_key) {
+ subtle::NoBarrier_Store(&key, reinterpret_cast<subtle::AtomicWord>(a_key));
+ subtle::NoBarrier_Store(&next, 0);
+}
+
+inline bool LockFreeAddressHashSet::Contains(void* key) const {
+ return FindNode(key) != nullptr;
+}
+
+inline void LockFreeAddressHashSet::Remove(void* key) {
+ Node* node = FindNode(key);
+ // TODO(alph): Replace with DCHECK.
+ CHECK(node != nullptr);
+ // We can never delete the node, nor detach it from the current bucket
+ // as there may always be another thread currently iterating over it.
+ // Instead we just mark it as empty, so |Insert| can reuse it later.
+ subtle::NoBarrier_Store(&node->key, 0);
+ subtle::NoBarrier_AtomicIncrement(&size_, -1);
+}
+
+inline LockFreeAddressHashSet::Node* LockFreeAddressHashSet::FindNode(
+ void* key) const {
+ for (Node* node = Bucket(key); node != nullptr; node = next_node(node)) {
+ void* k = reinterpret_cast<void*>(subtle::NoBarrier_Load(&node->key));
+ if (k == key)
+ return node;
+ }
+ return nullptr;
+}
+
+inline LockFreeAddressHashSet::Node* LockFreeAddressHashSet::Bucket(
+ void* key) const {
+ // TODO(alph): Replace with DCHECK.
+ CHECK(key != nullptr);
+ uint32_t h = Hash(key);
+ return reinterpret_cast<Node*>(
+ subtle::NoBarrier_Load(&buckets_[h & bucket_mask_]));
+}
+
+// static
+inline uint32_t LockFreeAddressHashSet::Hash(void* key) {
+ // A simple fast hash function for addresses.
+ uint64_t k = static_cast<uint64_t>(reinterpret_cast<uintptr_t>(key));
+ uint64_t random_bits = 0x4bfdb9df5a6f243bull;
+ return static_cast<uint32_t>((k * random_bits) >> 32);
+}
+
+} // namespace base
+
+#endif // BASE_SAMPLING_HEAP_PROFILER_LOCK_FREE_ADDRESS_HASH_SET_H_
diff --git a/chromium/base/sampling_heap_profiler/lock_free_address_hash_set_unittest.cc b/chromium/base/sampling_heap_profiler/lock_free_address_hash_set_unittest.cc
new file mode 100644
index 00000000000..dac0066982a
--- /dev/null
+++ b/chromium/base/sampling_heap_profiler/lock_free_address_hash_set_unittest.cc
@@ -0,0 +1,183 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sampling_heap_profiler/lock_free_address_hash_set.h"
+
+#include <stdlib.h>
+#include <cinttypes>
+#include <memory>
+
+#include "base/allocator/allocator_shim.h"
+#include "base/debug/alias.h"
+#include "base/threading/simple_thread.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+class LockFreeAddressHashSetTest : public ::testing::Test {
+ public:
+ static bool Subset(const LockFreeAddressHashSet& superset,
+ const LockFreeAddressHashSet& subset) {
+ for (subtle::AtomicWord bucket : subset.buckets_) {
+ for (LockFreeAddressHashSet::Node* node =
+ reinterpret_cast<LockFreeAddressHashSet::Node*>(bucket);
+ node; node = LockFreeAddressHashSet::next_node(node)) {
+ void* key = reinterpret_cast<void*>(node->key);
+ if (key && !superset.Contains(key))
+ return false;
+ }
+ }
+ return true;
+ }
+
+ static bool Equals(const LockFreeAddressHashSet& set1,
+ const LockFreeAddressHashSet& set2) {
+ return Subset(set1, set2) && Subset(set2, set1);
+ }
+
+ static size_t BucketSize(const LockFreeAddressHashSet& set, size_t bucket) {
+ size_t count = 0;
+ LockFreeAddressHashSet::Node* node =
+ reinterpret_cast<LockFreeAddressHashSet::Node*>(set.buckets_[bucket]);
+ for (; node; node = set.next_node(node))
+ ++count;
+ return count;
+ }
+};
+
+namespace {
+
+TEST_F(LockFreeAddressHashSetTest, EmptySet) {
+ LockFreeAddressHashSet set(8);
+ EXPECT_EQ(size_t(0), set.size());
+ EXPECT_EQ(size_t(8), set.buckets_count());
+ EXPECT_EQ(0., set.load_factor());
+ EXPECT_FALSE(set.Contains(&set));
+}
+
+TEST_F(LockFreeAddressHashSetTest, BasicOperations) {
+ LockFreeAddressHashSet set(8);
+
+ for (size_t i = 1; i <= 100; ++i) {
+ void* ptr = reinterpret_cast<void*>(i);
+ set.Insert(ptr);
+ EXPECT_EQ(i, set.size());
+ EXPECT_TRUE(set.Contains(ptr));
+ }
+
+ size_t size = 100;
+ EXPECT_EQ(size, set.size());
+ EXPECT_EQ(size_t(8), set.buckets_count());
+ EXPECT_EQ(size / 8., set.load_factor());
+
+ for (size_t i = 99; i >= 3; i -= 3) {
+ void* ptr = reinterpret_cast<void*>(i);
+ set.Remove(ptr);
+ EXPECT_EQ(--size, set.size());
+ EXPECT_FALSE(set.Contains(ptr));
+ }
+ // Removed every 3rd value (33 total) from the set, 67 have left.
+ EXPECT_EQ(size_t(67), set.size());
+
+ for (size_t i = 1; i <= 100; ++i) {
+ void* ptr = reinterpret_cast<void*>(i);
+ EXPECT_EQ(i % 3 != 0, set.Contains(ptr));
+ }
+}
+
+TEST_F(LockFreeAddressHashSetTest, Copy) {
+ LockFreeAddressHashSet set(16);
+
+ for (size_t i = 1000; i <= 16000; i += 1000) {
+ void* ptr = reinterpret_cast<void*>(i);
+ set.Insert(ptr);
+ }
+
+ LockFreeAddressHashSet set2(4);
+ LockFreeAddressHashSet set3(64);
+ set2.Copy(set);
+ set3.Copy(set);
+
+ EXPECT_TRUE(Equals(set, set2));
+ EXPECT_TRUE(Equals(set, set3));
+ EXPECT_TRUE(Equals(set2, set3));
+
+ set.Insert(reinterpret_cast<void*>(42));
+
+ EXPECT_FALSE(Equals(set, set2));
+ EXPECT_FALSE(Equals(set, set3));
+ EXPECT_TRUE(Equals(set2, set3));
+
+ EXPECT_TRUE(Subset(set, set2));
+ EXPECT_FALSE(Subset(set2, set));
+}
+
+class WriterThread : public SimpleThread {
+ public:
+ WriterThread(LockFreeAddressHashSet* set, subtle::Atomic32* cancel)
+ : SimpleThread("ReaderThread"), set_(set), cancel_(cancel) {}
+
+ void Run() override {
+ for (size_t value = 42; !subtle::Acquire_Load(cancel_); ++value) {
+ void* ptr = reinterpret_cast<void*>(value);
+ set_->Insert(ptr);
+ EXPECT_TRUE(set_->Contains(ptr));
+ set_->Remove(ptr);
+ EXPECT_FALSE(set_->Contains(ptr));
+ }
+ // Leave a key for reader to test.
+ set_->Insert(reinterpret_cast<void*>(0x1337));
+ }
+
+ private:
+ LockFreeAddressHashSet* set_;
+ subtle::Atomic32* cancel_;
+};
+
+#if defined(THREAD_SANITIZER)
+#define DISABLE_ON_TSAN(test_name) DISABLED_##test_name
+#else
+#define DISABLE_ON_TSAN(test_name) test_name
+#endif // defined(THREAD_SANITIZER)
+TEST_F(LockFreeAddressHashSetTest, DISABLE_ON_TSAN(ConcurrentAccess)) {
+ // The purpose of this test is to make sure adding/removing keys concurrently
+ // does not disrupt the state of other keys.
+ LockFreeAddressHashSet set(16);
+ subtle::Atomic32 cancel = 0;
+ auto thread = std::make_unique<WriterThread>(&set, &cancel);
+ thread->Start();
+ for (size_t i = 1; i <= 20; ++i)
+ set.Insert(reinterpret_cast<void*>(i));
+ // Remove some items to test empty nodes.
+ for (size_t i = 16; i <= 20; ++i)
+ set.Remove(reinterpret_cast<void*>(i));
+ for (size_t k = 0; k < 100000; ++k) {
+ for (size_t i = 1; i <= 30; ++i) {
+ EXPECT_EQ(i < 16, set.Contains(reinterpret_cast<void*>(i)));
+ }
+ }
+ subtle::Release_Store(&cancel, 1);
+ thread->Join();
+
+ EXPECT_TRUE(set.Contains(reinterpret_cast<void*>(0x1337)));
+ EXPECT_FALSE(set.Contains(reinterpret_cast<void*>(0xbadf00d)));
+}
+
+TEST_F(LockFreeAddressHashSetTest, BucketsUsage) {
+ // Test the uniformity of buckets usage.
+ size_t count = 10000;
+ LockFreeAddressHashSet set(16);
+ for (size_t i = 0; i < count; ++i)
+ set.Insert(reinterpret_cast<void*>(0x10000 + 0x10 * i));
+ size_t average_per_bucket = count / set.buckets_count();
+ for (size_t i = 0; i < set.buckets_count(); ++i) {
+ size_t usage = BucketSize(set, i);
+ EXPECT_LT(average_per_bucket * 95 / 100, usage);
+ EXPECT_GT(average_per_bucket * 105 / 100, usage);
+ }
+}
+
+} // namespace
+} // namespace base
diff --git a/chromium/base/sampling_heap_profiler/sampling_heap_profiler.cc b/chromium/base/sampling_heap_profiler/sampling_heap_profiler.cc
index 7f0e7e4d7cc..04c173725e4 100644
--- a/chromium/base/sampling_heap_profiler/sampling_heap_profiler.cc
+++ b/chromium/base/sampling_heap_profiler/sampling_heap_profiler.cc
@@ -17,9 +17,15 @@
#include "base/no_destructor.h"
#include "base/partition_alloc_buildflags.h"
#include "base/rand_util.h"
+#include "base/sampling_heap_profiler/lock_free_address_hash_set.h"
#include "base/threading/thread_local_storage.h"
#include "build/build_config.h"
+#if defined(OS_ANDROID) && BUILDFLAG(CAN_UNWIND_WITH_CFI_TABLE) && \
+ defined(OFFICIAL_BUILD)
+#include "base/trace_event/cfi_backtrace_android.h"
+#endif
+
namespace base {
using base::allocator::AllocatorDispatch;
@@ -40,8 +46,8 @@ bool g_deterministic;
// A positive value if profiling is running, otherwise it's zero.
Atomic32 g_running;
-// Pointer to the current |SamplingHeapProfiler::SamplesMap|.
-AtomicWord g_current_samples_map;
+// Pointer to the current |LockFreeAddressHashSet|.
+AtomicWord g_sampled_addresses_set;
// Sampling interval parameter, the mean value for intervals between samples.
AtomicWord g_sampling_interval = kDefaultSamplingIntervalBytes;
@@ -174,10 +180,11 @@ SamplingHeapProfiler* SamplingHeapProfiler::instance_;
SamplingHeapProfiler::SamplingHeapProfiler() {
instance_ = this;
- auto samples_map = std::make_unique<SamplesMap>(64);
+ auto sampled_addresses = std::make_unique<LockFreeAddressHashSet>(64);
base::subtle::NoBarrier_Store(
- &g_current_samples_map, reinterpret_cast<AtomicWord>(samples_map.get()));
- sample_maps_.push(std::move(samples_map));
+ &g_sampled_addresses_set,
+ reinterpret_cast<AtomicWord>(sampled_addresses.get()));
+ sampled_addresses_stack_.push(std::move(sampled_addresses));
}
// static
@@ -229,6 +236,14 @@ void SamplingHeapProfiler::SetHooksInstallCallback(
}
uint32_t SamplingHeapProfiler::Start() {
+#if defined(OS_ANDROID) && BUILDFLAG(CAN_UNWIND_WITH_CFI_TABLE) && \
+ defined(OFFICIAL_BUILD)
+ if (!base::trace_event::CFIBacktraceAndroid::GetInitializedInstance()
+ ->can_unwind_stack_frames()) {
+ LOG(WARNING) << "Sampling heap profiler: Stack unwinding is not available.";
+ return 0;
+ }
+#endif
InstallAllocatorHooksOnce();
base::subtle::Barrier_AtomicIncrement(&g_running, 1);
return last_sample_ordinal_;
@@ -307,16 +322,31 @@ void SamplingHeapProfiler::RecordAlloc(void* address,
void SamplingHeapProfiler::RecordStackTrace(Sample* sample,
uint32_t skip_frames) {
#if !defined(OS_NACL)
- // TODO(alph): Consider using debug::TraceStackFramePointers. It should be
- // somewhat faster than base::debug::StackTrace.
- base::debug::StackTrace trace;
- size_t count;
- void* const* addresses = const_cast<void* const*>(trace.Addresses(&count));
- const uint32_t kSkipProfilerOwnFrames = 2;
+ constexpr uint32_t kMaxStackEntries = 256;
+ constexpr uint32_t kSkipProfilerOwnFrames = 2;
skip_frames += kSkipProfilerOwnFrames;
+#if defined(OS_ANDROID) && BUILDFLAG(CAN_UNWIND_WITH_CFI_TABLE) && \
+ defined(OFFICIAL_BUILD)
+ const void* frames[kMaxStackEntries];
+ size_t frame_count =
+ base::trace_event::CFIBacktraceAndroid::GetInitializedInstance()->Unwind(
+ frames, kMaxStackEntries);
+#elif BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
+ const void* frames[kMaxStackEntries];
+ size_t frame_count = base::debug::TraceStackFramePointers(
+ frames, kMaxStackEntries, skip_frames);
+ skip_frames = 0;
+#else
+ // Fall-back to capturing the stack with base::debug::StackTrace,
+ // which is likely slower, but more reliable.
+ base::debug::StackTrace stack_trace(kMaxStackEntries);
+ size_t frame_count = 0;
+ const void* const* frames = stack_trace.Addresses(&frame_count);
+#endif
+
sample->stack.insert(
- sample->stack.end(), &addresses[skip_frames],
- &addresses[std::max(count, static_cast<size_t>(skip_frames))]);
+ sample->stack.end(), const_cast<void**>(&frames[skip_frames]),
+ const_cast<void**>(&frames[std::max<size_t>(frame_count, skip_frames)]));
#endif
}
@@ -333,15 +363,21 @@ void SamplingHeapProfiler::DoRecordAlloc(size_t total_allocated,
RecordStackTrace(&sample, skip_frames);
for (auto* observer : observers_)
observer->SampleAdded(sample.ordinal, size, total_allocated);
- EnsureNoRehashingMap().emplace(address, std::move(sample));
+ samples_.emplace(address, std::move(sample));
+ // TODO(alph): Sometimes RecordAlloc is called twice in a row without
+ // a RecordFree in between. Investigate it.
+ if (!sampled_addresses_set().Contains(address))
+ sampled_addresses_set().Insert(address);
+ BalanceAddressesHashSet();
}
entered_.Set(false);
}
// static
void SamplingHeapProfiler::RecordFree(void* address) {
- const SamplesMap& samples = SamplingHeapProfiler::samples();
- if (UNLIKELY(samples.find(address) != samples.end()))
+ if (UNLIKELY(address == nullptr))
+ return;
+ if (UNLIKELY(sampled_addresses_set().Contains(address)))
instance_->DoRecordFree(address);
}
@@ -353,43 +389,43 @@ void SamplingHeapProfiler::DoRecordFree(void* address) {
entered_.Set(true);
{
base::AutoLock lock(mutex_);
- SamplesMap& samples = this->samples();
- auto it = samples.find(address);
- CHECK(it != samples.end());
+ auto it = samples_.find(address);
+ CHECK(it != samples_.end());
for (auto* observer : observers_)
observer->SampleRemoved(it->second.ordinal);
- samples.erase(it);
+ samples_.erase(it);
+ sampled_addresses_set().Remove(address);
}
entered_.Set(false);
}
-SamplingHeapProfiler::SamplesMap& SamplingHeapProfiler::EnsureNoRehashingMap() {
- // The function makes sure we never rehash the current map in place.
- // Instead if it comes close to the rehashing boundary, we allocate a twice
- // larger map, copy the samples into it, and atomically switch new readers
- // to use the new map.
+void SamplingHeapProfiler::BalanceAddressesHashSet() {
+ // Check if the load_factor of the current addresses hash set becomes higher
+ // than 1, allocate a new twice larger one, copy all the data,
+ // and switch to using it.
+ // During the copy process no other writes are made to both sets
+ // as it's behind the lock.
+ // All the readers continue to use the old one until the atomic switch
+ // process takes place.
+ LockFreeAddressHashSet& current_set = sampled_addresses_set();
+ if (current_set.load_factor() < 1)
+ return;
+ auto new_set =
+ std::make_unique<LockFreeAddressHashSet>(current_set.buckets_count() * 2);
+ new_set->Copy(current_set);
+ // Atomically switch all the new readers to the new set.
+ base::subtle::Release_Store(&g_sampled_addresses_set,
+ reinterpret_cast<AtomicWord>(new_set.get()));
// We still have to keep all the old maps alive to resolve the theoretical
// race with readers in |RecordFree| that have already obtained the map,
// but haven't yet managed to access it.
- SamplesMap& samples = this->samples();
- size_t max_items_before_rehash =
- static_cast<size_t>(samples.bucket_count() * samples.max_load_factor());
- // Conservatively use 2 instead of 1 to workaround potential rounding errors.
- bool may_rehash_on_insert = samples.size() + 2 >= max_items_before_rehash;
- if (!may_rehash_on_insert)
- return samples;
- auto new_map = std::make_unique<SamplesMap>(samples.begin(), samples.end(),
- samples.bucket_count() * 2);
- base::subtle::Release_Store(&g_current_samples_map,
- reinterpret_cast<AtomicWord>(new_map.get()));
- sample_maps_.push(std::move(new_map));
- return this->samples();
+ sampled_addresses_stack_.push(std::move(new_set));
}
// static
-SamplingHeapProfiler::SamplesMap& SamplingHeapProfiler::samples() {
- return *reinterpret_cast<SamplesMap*>(
- base::subtle::NoBarrier_Load(&g_current_samples_map));
+LockFreeAddressHashSet& SamplingHeapProfiler::sampled_addresses_set() {
+ return *reinterpret_cast<LockFreeAddressHashSet*>(
+ base::subtle::NoBarrier_Load(&g_sampled_addresses_set));
}
// static
@@ -432,7 +468,7 @@ std::vector<SamplingHeapProfiler::Sample> SamplingHeapProfiler::GetSamples(
std::vector<Sample> samples;
{
base::AutoLock lock(mutex_);
- for (auto& it : this->samples()) {
+ for (auto& it : samples_) {
Sample& sample = it.second;
if (sample.ordinal > profile_id)
samples.push_back(sample);
diff --git a/chromium/base/sampling_heap_profiler/sampling_heap_profiler.h b/chromium/base/sampling_heap_profiler/sampling_heap_profiler.h
index c5792208e1e..ea50e67a2da 100644
--- a/chromium/base/sampling_heap_profiler/sampling_heap_profiler.h
+++ b/chromium/base/sampling_heap_profiler/sampling_heap_profiler.h
@@ -20,6 +20,8 @@ namespace base {
template <typename T>
class NoDestructor;
+class LockFreeAddressHashSet;
+
// The class implements sampling profiling of native memory heap.
// It hooks on base::allocator and base::PartitionAlloc.
// When started it selects and records allocation samples based on
@@ -81,8 +83,6 @@ class BASE_EXPORT SamplingHeapProfiler {
static SamplingHeapProfiler* GetInstance();
private:
- using SamplesMap = std::unordered_map<void*, Sample>;
-
SamplingHeapProfiler();
~SamplingHeapProfiler() = delete;
@@ -96,14 +96,16 @@ class BASE_EXPORT SamplingHeapProfiler {
uint32_t skip_frames);
void DoRecordFree(void* address);
void RecordStackTrace(Sample*, uint32_t skip_frames);
- SamplesMap& EnsureNoRehashingMap();
- static SamplesMap& samples();
+ static LockFreeAddressHashSet& sampled_addresses_set();
+
+ void BalanceAddressesHashSet();
base::ThreadLocalBoolean entered_;
base::Lock mutex_;
- std::stack<std::unique_ptr<SamplesMap>> sample_maps_;
+ std::stack<std::unique_ptr<LockFreeAddressHashSet>> sampled_addresses_stack_;
+ std::unordered_map<void*, Sample> samples_;
std::vector<SamplesObserver*> observers_;
- uint32_t last_sample_ordinal_ = 0;
+ uint32_t last_sample_ordinal_ = 1;
static SamplingHeapProfiler* instance_;
diff --git a/chromium/base/synchronization/atomic_flag.cc b/chromium/base/synchronization/atomic_flag.cc
index 8c2018d3693..5aed67f88cf 100644
--- a/chromium/base/synchronization/atomic_flag.cc
+++ b/chromium/base/synchronization/atomic_flag.cc
@@ -13,11 +13,11 @@ AtomicFlag::AtomicFlag() {
// Set() from the same sequence after. Note: the sequencing requirements are
// necessary for IsSet()'s callers to know which sequence's memory operations
// they are synchronized with.
- set_sequence_checker_.DetachFromSequence();
+ DETACH_FROM_SEQUENCE(set_sequence_checker_);
}
void AtomicFlag::Set() {
- DCHECK(set_sequence_checker_.CalledOnValidSequence());
+ DCHECK_CALLED_ON_VALID_SEQUENCE(set_sequence_checker_);
base::subtle::Release_Store(&flag_, 1);
}
diff --git a/chromium/base/synchronization/atomic_flag.h b/chromium/base/synchronization/atomic_flag.h
index ff175e190c5..a98a35c250e 100644
--- a/chromium/base/synchronization/atomic_flag.h
+++ b/chromium/base/synchronization/atomic_flag.h
@@ -34,7 +34,7 @@ class BASE_EXPORT AtomicFlag {
private:
base::subtle::Atomic32 flag_ = 0;
- SequenceChecker set_sequence_checker_;
+ SEQUENCE_CHECKER(set_sequence_checker_);
DISALLOW_COPY_AND_ASSIGN(AtomicFlag);
};
diff --git a/chromium/base/synchronization/condition_variable_unittest.cc b/chromium/base/synchronization/condition_variable_unittest.cc
index 705257a7076..d66aecc491f 100644
--- a/chromium/base/synchronization/condition_variable_unittest.cc
+++ b/chromium/base/synchronization/condition_variable_unittest.cc
@@ -194,7 +194,7 @@ TEST_F(ConditionVariableTest, TimeoutTest) {
lock.Release();
}
-#if defined(OS_POSIX) && !defined(OS_FUCHSIA)
+#if defined(OS_POSIX)
const int kDiscontinuitySeconds = 2;
void BackInTime(Lock* lock) {
@@ -245,10 +245,10 @@ TEST_F(ConditionVariableTest, DISABLED_TimeoutAcrossSetTimeOfDay) {
}
#endif
-
// Suddenly got flaky on Win, see http://crbug.com/10607 (starting at
// comment #15).
-#if defined(OS_WIN)
+// This is also flaky on Fuchsia, see http://crbug.com/738275.
+#if defined(OS_WIN) || defined(OS_FUCHSIA)
#define MAYBE_MultiThreadConsumerTest DISABLED_MultiThreadConsumerTest
#else
#define MAYBE_MultiThreadConsumerTest MultiThreadConsumerTest
@@ -398,7 +398,13 @@ TEST_F(ConditionVariableTest, MAYBE_MultiThreadConsumerTest) {
queue.ThreadSafeCheckShutdown(kThreadCount));
}
-TEST_F(ConditionVariableTest, LargeFastTaskTest) {
+#if defined(OS_FUCHSIA)
+// TODO(crbug.com/751894): This flakily times out on Fuchsia.
+#define MAYBE_LargeFastTaskTest DISABLED_LargeFastTaskTest
+#else
+#define MAYBE_LargeFastTaskTest LargeFastTaskTest
+#endif
+TEST_F(ConditionVariableTest, MAYBE_LargeFastTaskTest) {
const int kThreadCount = 200;
WorkQueue queue(kThreadCount); // Start the threads.
diff --git a/chromium/base/synchronization/waitable_event_mac.cc b/chromium/base/synchronization/waitable_event_mac.cc
index ad6f8cb0024..2daefb2aeec 100644
--- a/chromium/base/synchronization/waitable_event_mac.cc
+++ b/chromium/base/synchronization/waitable_event_mac.cc
@@ -116,23 +116,13 @@ bool WaitableEvent::TimedWaitUntil(const TimeTicks& end_time) {
// Record the event that this thread is blocking upon (for hang diagnosis).
debug::ScopedEventWaitActivity event_activity(this);
- TimeDelta wait_time = end_time - TimeTicks::Now();
- if (wait_time < TimeDelta()) {
- // A negative delta would be treated by the system as indefinite, but
- // it needs to be treated as a poll instead.
- wait_time = TimeDelta();
- }
-
mach_msg_empty_rcv_t msg{};
msg.header.msgh_local_port = receive_right_->Name();
mach_msg_option_t options = MACH_RCV_MSG;
- mach_msg_timeout_t timeout = 0;
- if (!end_time.is_max()) {
- options |= MACH_RCV_TIMEOUT;
- timeout = wait_time.InMillisecondsRoundedUp();
- }
+ if (!end_time.is_max())
+ options |= MACH_RCV_TIMEOUT | MACH_RCV_INTERRUPT;
mach_msg_size_t rcv_size = sizeof(msg);
if (policy_ == ResetPolicy::MANUAL) {
@@ -142,8 +132,22 @@ bool WaitableEvent::TimedWaitUntil(const TimeTicks& end_time) {
rcv_size = 0;
}
- kern_return_t kr = mach_msg(&msg.header, options, 0, rcv_size,
- receive_right_->Name(), timeout, MACH_PORT_NULL);
+ kern_return_t kr;
+ mach_msg_timeout_t timeout = MACH_MSG_TIMEOUT_NONE;
+ do {
+ if (!end_time.is_max()) {
+ timeout = std::max<int64_t>(
+ 0, (end_time - TimeTicks::Now()).InMillisecondsRoundedUp());
+ }
+ kr = mach_msg(&msg.header, options, 0, rcv_size, receive_right_->Name(),
+ timeout, MACH_PORT_NULL);
+ // If the thread is interrupted during mach_msg(), the system call
+ // will be restarted. However, the libsyscall wrapper does not adjust
+ // the timeout by the amount of time already waited.
+ // Using MACH_RCV_INTERRUPT will instead return from mach_msg(),
+ // so that the call can be retried with an adjusted timeout.
+ } while (kr == MACH_RCV_INTERRUPTED);
+
if (kr == KERN_SUCCESS) {
return true;
} else if (rcv_size == 0 && kr == MACH_RCV_TOO_LARGE) {
diff --git a/chromium/base/sys_info.h b/chromium/base/sys_info.h
index 6e58715bf5e..2e366e3b6cd 100644
--- a/chromium/base/sys_info.h
+++ b/chromium/base/sys_info.h
@@ -131,12 +131,6 @@ class BASE_EXPORT SysInfo {
// details.
static std::string GetLsbReleaseBoard();
- // DEPRECATED: Please see GetLsbReleaseBoard's comment.
- // Convenience function for GetLsbReleaseBoard() removing trailing "-signed-*"
- // if present. Returns "unknown" if CHROMEOS_RELEASE_BOARD is not set.
- // TODO(derat): Delete this after October 2017.
- static std::string GetStrippedReleaseBoard();
-
// Returns the creation time of /etc/lsb-release. (Used to get the date and
// time of the Chrome OS build).
static Time GetLsbReleaseTime();
@@ -147,6 +141,9 @@ class BASE_EXPORT SysInfo {
// Test method to force re-parsing of lsb-release.
static void SetChromeOSVersionInfoForTest(const std::string& lsb_release,
const Time& lsb_release_time);
+
+ // Returns the kernel version of the host operating system.
+ static std::string KernelVersion();
#endif // defined(OS_CHROMEOS)
#if defined(OS_ANDROID)
diff --git a/chromium/base/sys_info_chromeos.cc b/chromium/base/sys_info_chromeos.cc
index b9ec2c99059..5e834bff6fd 100644
--- a/chromium/base/sys_info_chromeos.cc
+++ b/chromium/base/sys_info_chromeos.cc
@@ -6,6 +6,7 @@
#include <stddef.h>
#include <stdint.h>
+#include <sys/utsname.h>
#include "base/environment.h"
#include "base/files/file.h"
@@ -18,6 +19,7 @@
#include "base/strings/string_split.h"
#include "base/strings/string_tokenizer.h"
#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
#include "base/threading/thread_restrictions.h"
namespace base {
@@ -178,6 +180,23 @@ void SysInfo::OperatingSystemVersionNumbers(int32_t* major_version,
}
// static
+std::string SysInfo::OperatingSystemVersion() {
+ int32_t major, minor, bugfix;
+ GetChromeOSVersionInfo().GetVersionNumbers(&major, &minor, &bugfix);
+ return base::StringPrintf("%d.%d.%d", major, minor, bugfix);
+}
+
+// static
+std::string SysInfo::KernelVersion() {
+ struct utsname info;
+ if (uname(&info) < 0) {
+ NOTREACHED();
+ return std::string();
+ }
+ return std::string(info.release);
+}
+
+// static
const SysInfo::LsbReleaseMap& SysInfo::GetLsbReleaseMap() {
return GetChromeOSVersionInfo().lsb_release_map();
}
@@ -197,16 +216,6 @@ std::string SysInfo::GetLsbReleaseBoard() {
}
// static
-std::string SysInfo::GetStrippedReleaseBoard() {
- std::string board = GetLsbReleaseBoard();
- const size_t index = board.find("-signed-");
- if (index != std::string::npos)
- board.resize(index);
-
- return base::ToLowerASCII(board);
-}
-
-// static
Time SysInfo::GetLsbReleaseTime() {
return GetChromeOSVersionInfo().lsb_release_time();
}
diff --git a/chromium/base/sys_info_posix.cc b/chromium/base/sys_info_posix.cc
index f6fcd10ffff..464c11a5e4d 100644
--- a/chromium/base/sys_info_posix.cc
+++ b/chromium/base/sys_info_posix.cc
@@ -178,7 +178,7 @@ std::string SysInfo::OperatingSystemName() {
}
#endif
-#if !defined(OS_MACOSX) && !defined(OS_ANDROID)
+#if !defined(OS_MACOSX) && !defined(OS_ANDROID) && !(OS_CHROMEOS)
// static
std::string SysInfo::OperatingSystemVersion() {
struct utsname info;
diff --git a/chromium/base/sys_info_unittest.cc b/chromium/base/sys_info_unittest.cc
index e97ab57d305..4bd5558da09 100644
--- a/chromium/base/sys_info_unittest.cc
+++ b/chromium/base/sys_info_unittest.cc
@@ -65,14 +65,28 @@ TEST_F(SysInfoTest, MAYBE_AmountOfAvailablePhysicalMemory) {
}
#endif // defined(OS_LINUX) || defined(OS_ANDROID)
-TEST_F(SysInfoTest, AmountOfFreeDiskSpace) {
+#if defined(OS_FUCHSIA)
+// TODO(crbug.com/851734): Implementation depends on statvfs, which is not
+// implemented on Fuchsia
+#define MAYBE_AmountOfFreeDiskSpace DISABLED_AmountOfFreeDiskSpace
+#else
+#define MAYBE_AmountOfFreeDiskSpace AmountOfFreeDiskSpace
+#endif
+TEST_F(SysInfoTest, MAYBE_AmountOfFreeDiskSpace) {
// We aren't actually testing that it's correct, just that it's sane.
FilePath tmp_path;
ASSERT_TRUE(GetTempDir(&tmp_path));
EXPECT_GE(SysInfo::AmountOfFreeDiskSpace(tmp_path), 0) << tmp_path.value();
}
-TEST_F(SysInfoTest, AmountOfTotalDiskSpace) {
+#if defined(OS_FUCHSIA)
+// TODO(crbug.com/851734): Implementation depends on statvfs, which is not
+// implemented on Fuchsia
+#define MAYBE_AmountOfTotalDiskSpace DISABLED_AmountOfTotalDiskSpace
+#else
+#define MAYBE_AmountOfTotalDiskSpace AmountOfTotalDiskSpace
+#endif
+TEST_F(SysInfoTest, MAYBE_AmountOfTotalDiskSpace) {
// We aren't actually testing that it's correct, just that it's sane.
FilePath tmp_path;
ASSERT_TRUE(GetTempDir(&tmp_path));
@@ -197,16 +211,6 @@ TEST_F(SysInfoTest, IsRunningOnChromeOS) {
EXPECT_TRUE(SysInfo::IsRunningOnChromeOS());
}
-TEST_F(SysInfoTest, GetStrippedReleaseBoard) {
- const char* kLsbRelease1 = "CHROMEOS_RELEASE_BOARD=Glimmer\n";
- SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease1, Time());
- EXPECT_EQ("glimmer", SysInfo::GetStrippedReleaseBoard());
-
- const char* kLsbRelease2 = "CHROMEOS_RELEASE_BOARD=glimmer-signed-mp-v4keys";
- SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease2, Time());
- EXPECT_EQ("glimmer", SysInfo::GetStrippedReleaseBoard());
-}
-
#endif // OS_CHROMEOS
} // namespace base
diff --git a/chromium/base/syslog_logging.cc b/chromium/base/syslog_logging.cc
index 03c2b5ea3e3..53bc1aaab62 100644
--- a/chromium/base/syslog_logging.cc
+++ b/chromium/base/syslog_logging.cc
@@ -10,9 +10,10 @@
#include "base/callback_helpers.h"
#include "base/debug/stack_trace.h"
#elif defined(OS_LINUX)
-// <syslog.h> defines a LOG_WARNING macro that could conflict with
-// base::LOG_WARNING.
+// <syslog.h> defines LOG_INFO, LOG_WARNING macros that could conflict with
+// base::LOG_INFO, base::LOG_WARNING.
#include <syslog.h>
+#undef LOG_INFO
#undef LOG_WARNING
#endif
diff --git a/chromium/base/task/sequence_manager/enqueue_order.cc b/chromium/base/task/sequence_manager/enqueue_order.cc
new file mode 100644
index 00000000000..066ef0382ee
--- /dev/null
+++ b/chromium/base/task/sequence_manager/enqueue_order.cc
@@ -0,0 +1,17 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/enqueue_order.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+EnqueueOrder::Generator::Generator() : counter_(kFirst) {}
+
+EnqueueOrder::Generator::~Generator() = default;
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
diff --git a/chromium/base/task/sequence_manager/enqueue_order.h b/chromium/base/task/sequence_manager/enqueue_order.h
new file mode 100644
index 00000000000..fac1d179b04
--- /dev/null
+++ b/chromium/base/task/sequence_manager/enqueue_order.h
@@ -0,0 +1,71 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_ENQUEUE_ORDER_H_
+#define BASE_TASK_SEQUENCE_MANAGER_ENQUEUE_ORDER_H_
+
+#include <stdint.h>
+
+#include <atomic>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+// 64-bit number which is used to order tasks.
+// SequenceManager assumes this number will never overflow.
+class EnqueueOrder {
+ public:
+ EnqueueOrder() : value_(kNone) {}
+ ~EnqueueOrder() = default;
+
+ static EnqueueOrder none() { return EnqueueOrder(kNone); }
+ static EnqueueOrder blocking_fence() { return EnqueueOrder(kBlockingFence); }
+
+ // It's okay to use EnqueueOrder in boolean expressions keeping in mind
+ // that some non-zero values have a special meaning.
+ operator uint64_t() const { return value_; }
+
+ static EnqueueOrder FromIntForTesting(uint64_t value) {
+ return EnqueueOrder(value);
+ }
+
+ // EnqueueOrder can't be created from a raw number in non-test code.
+ // Generator is used to create it with strictly monotonic guarantee.
+ class BASE_EXPORT Generator {
+ public:
+ Generator();
+ ~Generator();
+
+ // Can be called from any thread.
+ EnqueueOrder GenerateNext() {
+ return EnqueueOrder(std::atomic_fetch_add_explicit(
+ &counter_, uint64_t(1), std::memory_order_relaxed));
+ }
+
+ private:
+ std::atomic<uint64_t> counter_;
+ DISALLOW_COPY_AND_ASSIGN(Generator);
+ };
+
+ private:
+ explicit EnqueueOrder(uint64_t value) : value_(value) {}
+
+ enum SpecialValues : uint64_t {
+ kNone = 0,
+ kBlockingFence = 1,
+ kFirst = 2,
+ };
+
+ uint64_t value_;
+};
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_ENQUEUE_ORDER_H_
diff --git a/chromium/base/task/sequence_manager/graceful_queue_shutdown_helper.cc b/chromium/base/task/sequence_manager/graceful_queue_shutdown_helper.cc
new file mode 100644
index 00000000000..9a8c893e930
--- /dev/null
+++ b/chromium/base/task/sequence_manager/graceful_queue_shutdown_helper.cc
@@ -0,0 +1,42 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/graceful_queue_shutdown_helper.h"
+
+#include "base/task/sequence_manager/task_queue_impl.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+GracefulQueueShutdownHelper::GracefulQueueShutdownHelper()
+ : sequence_manager_deleted_(false) {}
+
+GracefulQueueShutdownHelper::~GracefulQueueShutdownHelper() = default;
+
+void GracefulQueueShutdownHelper::GracefullyShutdownTaskQueue(
+ std::unique_ptr<internal::TaskQueueImpl> task_queue) {
+ AutoLock lock(lock_);
+ if (sequence_manager_deleted_)
+ return;
+ queues_.push_back(std::move(task_queue));
+}
+
+void GracefulQueueShutdownHelper::OnSequenceManagerDeleted() {
+ AutoLock lock(lock_);
+ sequence_manager_deleted_ = true;
+ queues_.clear();
+}
+
+std::vector<std::unique_ptr<internal::TaskQueueImpl>>
+GracefulQueueShutdownHelper::TakeQueues() {
+ AutoLock lock(lock_);
+ std::vector<std::unique_ptr<internal::TaskQueueImpl>> result;
+ result.swap(queues_);
+ return result;
+}
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
diff --git a/chromium/base/task/sequence_manager/graceful_queue_shutdown_helper.h b/chromium/base/task/sequence_manager/graceful_queue_shutdown_helper.h
new file mode 100644
index 00000000000..108eb827b28
--- /dev/null
+++ b/chromium/base/task/sequence_manager/graceful_queue_shutdown_helper.h
@@ -0,0 +1,50 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_GRACEFUL_QUEUE_SHUTDOWN_HELPER_H_
+#define BASE_TASK_SEQUENCE_MANAGER_GRACEFUL_QUEUE_SHUTDOWN_HELPER_H_
+
+#include <memory>
+#include <vector>
+
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+class TaskQueueImpl;
+
+// Thread-safe helper to shutdown queues from any thread.
+class GracefulQueueShutdownHelper
+ : public RefCountedThreadSafe<GracefulQueueShutdownHelper> {
+ public:
+ GracefulQueueShutdownHelper();
+
+ void GracefullyShutdownTaskQueue(
+ std::unique_ptr<internal::TaskQueueImpl> queue);
+
+ void OnSequenceManagerDeleted();
+
+ std::vector<std::unique_ptr<internal::TaskQueueImpl>> TakeQueues();
+
+ private:
+ // This class is ref-counted so it controls its own lifetime.
+ ~GracefulQueueShutdownHelper();
+ friend class RefCountedThreadSafe<GracefulQueueShutdownHelper>;
+
+ Lock lock_;
+ bool sequence_manager_deleted_;
+ std::vector<std::unique_ptr<internal::TaskQueueImpl>> queues_;
+
+ DISALLOW_COPY_AND_ASSIGN(GracefulQueueShutdownHelper);
+};
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_GRACEFUL_QUEUE_SHUTDOWN_HELPER_H_
diff --git a/chromium/base/task/sequence_manager/intrusive_heap.h b/chromium/base/task/sequence_manager/intrusive_heap.h
new file mode 100644
index 00000000000..eb2fc8a454b
--- /dev/null
+++ b/chromium/base/task/sequence_manager/intrusive_heap.h
@@ -0,0 +1,229 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_INTRUSIVE_HEAP_H_
+#define BASE_TASK_SEQUENCE_MANAGER_INTRUSIVE_HEAP_H_
+
+#include <algorithm>
+#include <vector>
+
+#include "base/logging.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+template <typename T>
+class IntrusiveHeap;
+
+// Intended as an opaque wrapper around |index_|.
+class HeapHandle {
+ public:
+ HeapHandle() : index_(0u) {}
+
+ bool IsValid() const { return index_ != 0u; }
+
+ private:
+ template <typename T>
+ friend class IntrusiveHeap;
+
+ HeapHandle(size_t index) : index_(index) {}
+
+ size_t index_;
+};
+
+// A standard min-heap with the following assumptions:
+// 1. T has operator <=
+// 2. T has method void SetHeapHandle(HeapHandle handle)
+// 3. T has method void ClearHeapHandle()
+// 4. T is moveable
+// 5. T is default constructible
+// 6. The heap size never gets terribly big so reclaiming memory on pop/erase
+// isn't a priority.
+//
+// The reason IntrusiveHeap exists is to provide similar performance to
+// std::priority_queue while allowing removal of arbitrary elements.
+template <typename T>
+class IntrusiveHeap {
+ public:
+ IntrusiveHeap() : nodes_(kMinimumHeapSize), size_(0) {}
+
+ ~IntrusiveHeap() {
+ for (size_t i = 1; i <= size_; i++) {
+ MakeHole(i);
+ }
+ }
+
+ bool empty() const { return size_ == 0; }
+
+ size_t size() const { return size_; }
+
+ void Clear() {
+ for (size_t i = 1; i <= size_; i++) {
+ MakeHole(i);
+ }
+ nodes_.resize(kMinimumHeapSize);
+ size_ = 0;
+ }
+
+ const T& Min() const {
+ DCHECK_GE(size_, 1u);
+ return nodes_[1];
+ }
+
+ void Pop() {
+ DCHECK_GE(size_, 1u);
+ MakeHole(1u);
+ size_t top_index = size_--;
+ if (!empty())
+ MoveHoleDownAndFillWithLeafElement(1u, std::move(nodes_[top_index]));
+ }
+
+ void insert(T&& element) {
+ size_++;
+ if (size_ >= nodes_.size())
+ nodes_.resize(nodes_.size() * 2);
+ // Notionally we have a hole in the tree at index |size_|, move this up
+ // to find the right insertion point.
+ MoveHoleUpAndFillWithElement(size_, std::move(element));
+ }
+
+ void erase(HeapHandle handle) {
+ DCHECK_GT(handle.index_, 0u);
+ DCHECK_LE(handle.index_, size_);
+ MakeHole(handle.index_);
+ size_t top_index = size_--;
+ if (empty() || top_index == handle.index_)
+ return;
+ if (nodes_[handle.index_] <= nodes_[top_index]) {
+ MoveHoleDownAndFillWithLeafElement(handle.index_,
+ std::move(nodes_[top_index]));
+ } else {
+ MoveHoleUpAndFillWithElement(handle.index_, std::move(nodes_[top_index]));
+ }
+ }
+
+ void ReplaceMin(T&& element) {
+ // Note |element| might not be a leaf node so we can't use
+ // MoveHoleDownAndFillWithLeafElement.
+ MoveHoleDownAndFillWithElement(1u, std::move(element));
+ }
+
+ void ChangeKey(HeapHandle handle, T&& element) {
+ if (nodes_[handle.index_] <= element) {
+ MoveHoleDownAndFillWithLeafElement(handle.index_, std::move(element));
+ } else {
+ MoveHoleUpAndFillWithElement(handle.index_, std::move(element));
+ }
+ }
+
+ // Caution mutating the heap invalidates the iterators.
+ const T* begin() const { return &nodes_[1u]; }
+ const T* end() const { return begin() + size_; }
+
+ private:
+ enum {
+ // The majority of sets in the scheduler have 0-3 items in them (a few will
+ // have perhaps up to 100), so this means we usually only have to allocate
+ // memory once.
+ kMinimumHeapSize = 4u
+ };
+
+ friend class IntrusiveHeapTest;
+
+ size_t MoveHole(size_t new_hole_pos, size_t old_hole_pos) {
+ DCHECK_GT(new_hole_pos, 0u);
+ DCHECK_LE(new_hole_pos, size_);
+ DCHECK_GT(new_hole_pos, 0u);
+ DCHECK_LE(new_hole_pos, size_);
+ DCHECK_NE(old_hole_pos, new_hole_pos);
+ nodes_[old_hole_pos] = std::move(nodes_[new_hole_pos]);
+ nodes_[old_hole_pos].SetHeapHandle(HeapHandle(old_hole_pos));
+ return new_hole_pos;
+ }
+
+ // Notionally creates a hole in the tree at |index|.
+ void MakeHole(size_t index) {
+ DCHECK_GT(index, 0u);
+ DCHECK_LE(index, size_);
+ nodes_[index].ClearHeapHandle();
+ }
+
+ void FillHole(size_t hole, T&& element) {
+ DCHECK_GT(hole, 0u);
+ DCHECK_LE(hole, size_);
+ nodes_[hole] = std::move(element);
+ nodes_[hole].SetHeapHandle(HeapHandle(hole));
+ DCHECK(std::is_heap(begin(), end(), CompareNodes));
+ }
+
+ // is_heap requires a strict comparator.
+ static bool CompareNodes(const T& a, const T& b) { return !(a <= b); }
+
+ // Moves the |hole| up the tree and when the right position has been found
+ // |element| is moved in.
+ void MoveHoleUpAndFillWithElement(size_t hole, T&& element) {
+ DCHECK_GT(hole, 0u);
+ DCHECK_LE(hole, size_);
+ while (hole >= 2u) {
+ size_t parent_pos = hole / 2;
+ if (nodes_[parent_pos] <= element)
+ break;
+
+ hole = MoveHole(parent_pos, hole);
+ }
+ FillHole(hole, std::move(element));
+ }
+
+ // Moves the |hole| down the tree and when the right position has been found
+ // |element| is moved in.
+ void MoveHoleDownAndFillWithElement(size_t hole, T&& element) {
+ DCHECK_GT(hole, 0u);
+ DCHECK_LE(hole, size_);
+ size_t child_pos = hole * 2;
+ while (child_pos < size_) {
+ if (nodes_[child_pos + 1] <= nodes_[child_pos])
+ child_pos++;
+
+ if (element <= nodes_[child_pos])
+ break;
+
+ hole = MoveHole(child_pos, hole);
+ child_pos *= 2;
+ }
+ if (child_pos == size_ && !(element <= nodes_[child_pos]))
+ hole = MoveHole(child_pos, hole);
+ FillHole(hole, std::move(element));
+ }
+
+ // Moves the |hole| down the tree and when the right position has been found
+ // |leaf_element| is moved in. Faster than MoveHoleDownAndFillWithElement
+ // (it does one key comparison per level instead of two) but only valid for
+ // leaf elements (i.e. one of the max values).
+ void MoveHoleDownAndFillWithLeafElement(size_t hole, T&& leaf_element) {
+ DCHECK_GT(hole, 0u);
+ DCHECK_LE(hole, size_);
+ size_t child_pos = hole * 2;
+ while (child_pos < size_) {
+ size_t second_child = child_pos + 1;
+ if (nodes_[second_child] <= nodes_[child_pos])
+ child_pos = second_child;
+
+ hole = MoveHole(child_pos, hole);
+ child_pos *= 2;
+ }
+ if (child_pos == size_)
+ hole = MoveHole(child_pos, hole);
+ MoveHoleUpAndFillWithElement(hole, std::move(leaf_element));
+ }
+
+ std::vector<T> nodes_; // NOTE we use 1-based indexing
+ size_t size_;
+};
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_INTRUSIVE_HEAP_H_
diff --git a/chromium/base/task/sequence_manager/intrusive_heap_unittest.cc b/chromium/base/task/sequence_manager/intrusive_heap_unittest.cc
new file mode 100644
index 00000000000..3c1323a76f6
--- /dev/null
+++ b/chromium/base/task/sequence_manager/intrusive_heap_unittest.cc
@@ -0,0 +1,378 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/intrusive_heap.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+namespace {
+
+struct TestElement {
+ int key;
+ HeapHandle* handle;
+
+ bool operator<=(const TestElement& other) const { return key <= other.key; }
+
+ void SetHeapHandle(HeapHandle h) {
+ if (handle)
+ *handle = h;
+ }
+
+ void ClearHeapHandle() {
+ if (handle)
+ *handle = HeapHandle();
+ }
+};
+
+} // namespace
+
+class IntrusiveHeapTest : public testing::Test {
+ protected:
+ static bool CompareNodes(const TestElement& a, const TestElement& b) {
+ return IntrusiveHeap<TestElement>::CompareNodes(a, b);
+ }
+};
+
+TEST_F(IntrusiveHeapTest, Basic) {
+ IntrusiveHeap<TestElement> heap;
+
+ EXPECT_TRUE(heap.empty());
+ EXPECT_EQ(0u, heap.size());
+}
+
+TEST_F(IntrusiveHeapTest, Clear) {
+ IntrusiveHeap<TestElement> heap;
+ HeapHandle index1;
+
+ heap.insert({11, &index1});
+ EXPECT_EQ(1u, heap.size());
+ EXPECT_TRUE(index1.IsValid());
+
+ heap.Clear();
+ EXPECT_EQ(0u, heap.size());
+ EXPECT_FALSE(index1.IsValid());
+}
+
+TEST_F(IntrusiveHeapTest, Destructor) {
+ HeapHandle index1;
+
+ {
+ IntrusiveHeap<TestElement> heap;
+
+ heap.insert({11, &index1});
+ EXPECT_EQ(1u, heap.size());
+ EXPECT_TRUE(index1.IsValid());
+ }
+
+ EXPECT_FALSE(index1.IsValid());
+}
+
+TEST_F(IntrusiveHeapTest, Min) {
+ IntrusiveHeap<TestElement> heap;
+
+ heap.insert({9, nullptr});
+ heap.insert({10, nullptr});
+ heap.insert({8, nullptr});
+ heap.insert({2, nullptr});
+ heap.insert({7, nullptr});
+ heap.insert({15, nullptr});
+ heap.insert({22, nullptr});
+ heap.insert({3, nullptr});
+
+ EXPECT_FALSE(heap.empty());
+ EXPECT_EQ(8u, heap.size());
+ EXPECT_EQ(2, heap.Min().key);
+}
+
+TEST_F(IntrusiveHeapTest, InsertAscending) {
+ IntrusiveHeap<TestElement> heap;
+ HeapHandle index1;
+
+ for (int i = 0; i < 50; i++)
+ heap.insert({i, nullptr});
+
+ EXPECT_EQ(0, heap.Min().key);
+ EXPECT_EQ(50u, heap.size());
+}
+
+TEST_F(IntrusiveHeapTest, InsertDescending) {
+ IntrusiveHeap<TestElement> heap;
+
+ for (int i = 0; i < 50; i++)
+ heap.insert({50 - i, nullptr});
+
+ EXPECT_EQ(1, heap.Min().key);
+ EXPECT_EQ(50u, heap.size());
+}
+
+TEST_F(IntrusiveHeapTest, HeapIndex) {
+ HeapHandle index5;
+ HeapHandle index4;
+ HeapHandle index3;
+ HeapHandle index2;
+ HeapHandle index1;
+ IntrusiveHeap<TestElement> heap;
+
+ EXPECT_FALSE(index1.IsValid());
+ EXPECT_FALSE(index2.IsValid());
+ EXPECT_FALSE(index3.IsValid());
+ EXPECT_FALSE(index4.IsValid());
+ EXPECT_FALSE(index5.IsValid());
+
+ heap.insert({15, &index5});
+ heap.insert({14, &index4});
+ heap.insert({13, &index3});
+ heap.insert({12, &index2});
+ heap.insert({11, &index1});
+
+ EXPECT_TRUE(index1.IsValid());
+ EXPECT_TRUE(index2.IsValid());
+ EXPECT_TRUE(index3.IsValid());
+ EXPECT_TRUE(index4.IsValid());
+ EXPECT_TRUE(index5.IsValid());
+
+ EXPECT_FALSE(heap.empty());
+}
+
+TEST_F(IntrusiveHeapTest, Pop) {
+ IntrusiveHeap<TestElement> heap;
+ HeapHandle index1;
+ HeapHandle index2;
+
+ heap.insert({11, &index1});
+ heap.insert({12, &index2});
+ EXPECT_EQ(2u, heap.size());
+ EXPECT_TRUE(index1.IsValid());
+ EXPECT_TRUE(index2.IsValid());
+
+ heap.Pop();
+ EXPECT_EQ(1u, heap.size());
+ EXPECT_FALSE(index1.IsValid());
+ EXPECT_TRUE(index2.IsValid());
+
+ heap.Pop();
+ EXPECT_EQ(0u, heap.size());
+ EXPECT_FALSE(index1.IsValid());
+ EXPECT_FALSE(index2.IsValid());
+}
+
+TEST_F(IntrusiveHeapTest, PopMany) {
+ IntrusiveHeap<TestElement> heap;
+
+ for (int i = 0; i < 500; i++)
+ heap.insert({i, nullptr});
+
+ EXPECT_FALSE(heap.empty());
+ EXPECT_EQ(500u, heap.size());
+ for (int i = 0; i < 500; i++) {
+ EXPECT_EQ(i, heap.Min().key);
+ heap.Pop();
+ }
+ EXPECT_TRUE(heap.empty());
+}
+
+TEST_F(IntrusiveHeapTest, Erase) {
+ IntrusiveHeap<TestElement> heap;
+
+ HeapHandle index12;
+
+ heap.insert({15, nullptr});
+ heap.insert({14, nullptr});
+ heap.insert({13, nullptr});
+ heap.insert({12, &index12});
+ heap.insert({11, nullptr});
+
+ EXPECT_EQ(5u, heap.size());
+ EXPECT_TRUE(index12.IsValid());
+ heap.erase(index12);
+ EXPECT_EQ(4u, heap.size());
+ EXPECT_FALSE(index12.IsValid());
+
+ EXPECT_EQ(11, heap.Min().key);
+ heap.Pop();
+ EXPECT_EQ(13, heap.Min().key);
+ heap.Pop();
+ EXPECT_EQ(14, heap.Min().key);
+ heap.Pop();
+ EXPECT_EQ(15, heap.Min().key);
+ heap.Pop();
+ EXPECT_TRUE(heap.empty());
+}
+
+TEST_F(IntrusiveHeapTest, ReplaceMin) {
+ IntrusiveHeap<TestElement> heap;
+
+ for (int i = 0; i < 500; i++)
+ heap.insert({500 - i, nullptr});
+
+ EXPECT_EQ(1, heap.Min().key);
+
+ for (int i = 0; i < 500; i++)
+ heap.ReplaceMin({1000 + i, nullptr});
+
+ EXPECT_EQ(1000, heap.Min().key);
+}
+
+TEST_F(IntrusiveHeapTest, ReplaceMinWithNonLeafNode) {
+ IntrusiveHeap<TestElement> heap;
+
+ for (int i = 0; i < 50; i++) {
+ heap.insert({i, nullptr});
+ heap.insert({200 + i, nullptr});
+ }
+
+ EXPECT_EQ(0, heap.Min().key);
+
+ for (int i = 0; i < 50; i++)
+ heap.ReplaceMin({100 + i, nullptr});
+
+ for (int i = 0; i < 50; i++) {
+ EXPECT_EQ((100 + i), heap.Min().key);
+ heap.Pop();
+ }
+ for (int i = 0; i < 50; i++) {
+ EXPECT_EQ((200 + i), heap.Min().key);
+ heap.Pop();
+ }
+ EXPECT_TRUE(heap.empty());
+}
+
+TEST_F(IntrusiveHeapTest, ReplaceMinCheckAllFinalPositions) {
+ HeapHandle index[100];
+
+ for (int j = -1; j <= 201; j += 2) {
+ IntrusiveHeap<TestElement> heap;
+ for (size_t i = 0; i < 100; i++) {
+ heap.insert({static_cast<int>(i) * 2, &index[i]});
+ }
+
+ heap.ReplaceMin({j, &index[40]});
+
+ int prev = -2;
+ while (!heap.empty()) {
+ DCHECK_GT(heap.Min().key, prev);
+ DCHECK(heap.Min().key == j || (heap.Min().key % 2) == 0);
+ DCHECK_NE(heap.Min().key, 0);
+ prev = heap.Min().key;
+ heap.Pop();
+ }
+ }
+}
+
+TEST_F(IntrusiveHeapTest, ChangeKeyUp) {
+ IntrusiveHeap<TestElement> heap;
+ HeapHandle index[10];
+
+ for (size_t i = 0; i < 10; i++) {
+ heap.insert({static_cast<int>(i) * 2, &index[i]});
+ }
+
+ heap.ChangeKey(index[5], {17, &index[5]});
+
+ std::vector<int> results;
+ while (!heap.empty()) {
+ results.push_back(heap.Min().key);
+ heap.Pop();
+ }
+
+ EXPECT_THAT(results, testing::ElementsAre(0, 2, 4, 6, 8, 12, 14, 16, 17, 18));
+}
+
+TEST_F(IntrusiveHeapTest, ChangeKeyUpButDoesntMove) {
+ IntrusiveHeap<TestElement> heap;
+ HeapHandle index[10];
+
+ for (size_t i = 0; i < 10; i++) {
+ heap.insert({static_cast<int>(i) * 2, &index[i]});
+ }
+
+ heap.ChangeKey(index[5], {11, &index[5]});
+
+ std::vector<int> results;
+ while (!heap.empty()) {
+ results.push_back(heap.Min().key);
+ heap.Pop();
+ }
+
+ EXPECT_THAT(results, testing::ElementsAre(0, 2, 4, 6, 8, 11, 12, 14, 16, 18));
+}
+
+TEST_F(IntrusiveHeapTest, ChangeKeyDown) {
+ IntrusiveHeap<TestElement> heap;
+ HeapHandle index[10];
+
+ for (size_t i = 0; i < 10; i++) {
+ heap.insert({static_cast<int>(i) * 2, &index[i]});
+ }
+
+ heap.ChangeKey(index[5], {1, &index[5]});
+
+ std::vector<int> results;
+ while (!heap.empty()) {
+ results.push_back(heap.Min().key);
+ heap.Pop();
+ }
+
+ EXPECT_THAT(results, testing::ElementsAre(0, 1, 2, 4, 6, 8, 12, 14, 16, 18));
+}
+
+TEST_F(IntrusiveHeapTest, ChangeKeyDownButDoesntMove) {
+ IntrusiveHeap<TestElement> heap;
+ HeapHandle index[10];
+
+ for (size_t i = 0; i < 10; i++) {
+ heap.insert({static_cast<int>(i) * 2, &index[i]});
+ }
+
+ heap.ChangeKey(index[5], {9, &index[5]});
+
+ std::vector<int> results;
+ while (!heap.empty()) {
+ results.push_back(heap.Min().key);
+ heap.Pop();
+ }
+
+ EXPECT_THAT(results, testing::ElementsAre(0, 2, 4, 6, 8, 9, 12, 14, 16, 18));
+}
+
+TEST_F(IntrusiveHeapTest, ChangeKeyCheckAllFinalPositions) {
+ HeapHandle index[100];
+
+ for (int j = -1; j <= 201; j += 2) {
+ IntrusiveHeap<TestElement> heap;
+ for (size_t i = 0; i < 100; i++) {
+ heap.insert({static_cast<int>(i) * 2, &index[i]});
+ }
+
+ heap.ChangeKey(index[40], {j, &index[40]});
+
+ int prev = -2;
+ while (!heap.empty()) {
+ DCHECK_GT(heap.Min().key, prev);
+ DCHECK(heap.Min().key == j || (heap.Min().key % 2) == 0);
+ DCHECK_NE(heap.Min().key, 80);
+ prev = heap.Min().key;
+ heap.Pop();
+ }
+ }
+}
+
+TEST_F(IntrusiveHeapTest, CompareNodes) {
+ TestElement five{5, nullptr}, six{6, nullptr};
+
+ // Check that we have a strict comparator, otherwise std::is_heap()
+ // (used in DCHECK) may fail. See http://crbug.com/661080.
+ EXPECT_FALSE(IntrusiveHeapTest::CompareNodes(six, six));
+
+ EXPECT_FALSE(IntrusiveHeapTest::CompareNodes(five, six));
+ EXPECT_TRUE(IntrusiveHeapTest::CompareNodes(six, five));
+}
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
diff --git a/chromium/base/task/sequence_manager/lazily_deallocated_deque.h b/chromium/base/task/sequence_manager/lazily_deallocated_deque.h
new file mode 100644
index 00000000000..7a4d7bad6a5
--- /dev/null
+++ b/chromium/base/task/sequence_manager/lazily_deallocated_deque.h
@@ -0,0 +1,364 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_LAZILY_DEALLOCATED_DEQUE_H_
+#define BASE_TASK_SEQUENCE_MANAGER_LAZILY_DEALLOCATED_DEQUE_H_
+
+#include <algorithm>
+#include <cmath>
+#include <memory>
+#include <vector>
+
+#include "base/gtest_prod_util.h"
+#include "base/logging.h"
+#include "base/time/time.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+// A LazilyDeallocatedDeque specialized for the SequenceManager's usage
+// patterns. The queue generally grows while tasks are added and then removed
+// until empty and the cycle repeats.
+//
+// The main difference between sequence_manager::LazilyDeallocatedDeque and
+// others is memory management. For performance (memory allocation isn't free)
+// we don't automatically reclaiming memory when the queue becomes empty.
+// Instead we rely on the surrounding code periodically calling
+// MaybeShrinkQueue, ideally when the queue is empty.
+//
+// We keep track of the maximum recent queue size and rate limit
+// MaybeShrinkQueue to avoid unnecessary churn.
+//
+// NB this queue isn't by itself thread safe.
+template <typename T>
+class LazilyDeallocatedDeque {
+ public:
+ enum {
+ // Minimum allocation for a ring. Note a ring of size 4 will only hold up to
+ // 3 elements.
+ kMinimumRingSize = 4,
+
+ // Maximum "wasted" capacity allowed when considering if we should resize
+ // the backing store.
+ kReclaimThreshold = 16,
+
+ // Used to rate limit how frequently MaybeShrinkQueue actually shrinks the
+ // queue.
+ kMinimumShrinkIntervalInSeconds = 5
+ };
+
+ LazilyDeallocatedDeque() {}
+
+ ~LazilyDeallocatedDeque() { clear(); }
+
+ bool empty() const { return size_ == 0; }
+
+ size_t max_size() const { return max_size_; }
+
+ size_t size() const { return size_; }
+
+ size_t capacity() const {
+ size_t capacity = 0;
+ for (const Ring* iter = head_.get(); iter; iter = iter->next_.get()) {
+ capacity += iter->capacity();
+ }
+ return capacity;
+ }
+
+ void clear() {
+ while (head_) {
+ head_ = std::move(head_->next_);
+ }
+
+ tail_ = nullptr;
+ size_ = 0;
+ }
+
+ // Assumed to be an uncommon operation.
+ void push_front(T t) {
+ if (!head_) {
+ head_ = std::make_unique<Ring>(kMinimumRingSize);
+ tail_ = head_.get();
+ }
+
+ // Grow if needed, by the minimum amount.
+ if (!head_->CanPush()) {
+ std::unique_ptr<Ring> new_ring = std::make_unique<Ring>(kMinimumRingSize);
+ new_ring->next_ = std::move(head_);
+ head_ = std::move(new_ring);
+ }
+
+ head_->push_front(std::move(t));
+ max_size_ = std::max(max_size_, ++size_);
+ }
+
+ // Assumed to be a common operation.
+ void push_back(T t) {
+ if (!head_) {
+ head_ = std::make_unique<Ring>(kMinimumRingSize);
+ tail_ = head_.get();
+ }
+
+ // Grow if needed.
+ if (!tail_->CanPush()) {
+ tail_->next_ = std::make_unique<Ring>(tail_->capacity() * 2);
+ tail_ = tail_->next_.get();
+ }
+
+ tail_->push_back(std::move(t));
+ max_size_ = std::max(max_size_, ++size_);
+ }
+
+ T& front() {
+ DCHECK(head_);
+ return head_->front();
+ }
+
+ const T& front() const {
+ DCHECK(head_);
+ return head_->front();
+ }
+
+ T& back() {
+ DCHECK(tail_);
+ return tail_->back();
+ }
+
+ const T& back() const {
+ DCHECK(tail_);
+ return tail_->back();
+ }
+
+ void pop_front() {
+ DCHECK(tail_);
+ DCHECK_GT(size_, 0u);
+ head_->pop_front();
+
+ // If the ring has become empty and we have several rings then, remove the
+ // head one (which we expect to have lower capacity than the remaining
+ // ones).
+ if (head_->empty() && head_->next_) {
+ head_ = std::move(head_->next_);
+ }
+
+ --size_;
+ }
+
+ void swap(LazilyDeallocatedDeque& other) {
+ std::swap(head_, other.head_);
+ std::swap(tail_, other.tail_);
+ std::swap(size_, other.size_);
+ std::swap(max_size_, other.max_size_);
+ std::swap(next_resize_time_, other.next_resize_time_);
+ }
+
+ void MaybeShrinkQueue() {
+ if (!tail_)
+ return;
+
+ DCHECK_GE(max_size_, size_);
+
+ // Rate limit how often we shrink the queue because it's somewhat expensive.
+ TimeTicks current_time = TimeTicks::Now();
+ if (current_time < next_resize_time_)
+ return;
+
+ // Due to the way the Ring works we need 1 more slot than is used.
+ size_t new_capacity = max_size_ + 1;
+ if (new_capacity < kMinimumRingSize)
+ new_capacity = kMinimumRingSize;
+
+ // Reset |max_size_| so that unless usage has spiked up we will consider
+ // reclaiming it next time.
+ max_size_ = size_;
+
+ // Only realloc if the current capacity is sufficiently the observed maximum
+ // size for the previous period.
+ if (new_capacity + kReclaimThreshold >= capacity())
+ return;
+
+ SetCapacity(new_capacity);
+ next_resize_time_ =
+ current_time + TimeDelta::FromSeconds(kMinimumShrinkIntervalInSeconds);
+ }
+
+ void SetCapacity(size_t new_capacity) {
+ std::unique_ptr<Ring> new_ring = std::make_unique<Ring>(new_capacity);
+
+ DCHECK_GE(new_capacity, size_ + 1);
+
+ // Preserve the |size_| which counts down to zero in the while loop.
+ size_t real_size = size_;
+
+ while (!empty()) {
+ DCHECK(new_ring->CanPush());
+ new_ring->push_back(std::move(head_->front()));
+ pop_front();
+ }
+
+ size_ = real_size;
+
+ DCHECK_EQ(head_.get(), tail_);
+ head_ = std::move(new_ring);
+ tail_ = head_.get();
+ }
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(LazilyDeallocatedDequeTest, RingPushFront);
+ FRIEND_TEST_ALL_PREFIXES(LazilyDeallocatedDequeTest, RingPushBack);
+ FRIEND_TEST_ALL_PREFIXES(LazilyDeallocatedDequeTest, RingCanPush);
+ FRIEND_TEST_ALL_PREFIXES(LazilyDeallocatedDequeTest, RingPushPopPushPop);
+
+ struct Ring {
+ explicit Ring(size_t capacity)
+ : capacity_(capacity),
+ front_index_(0),
+ back_index_(0),
+ data_(reinterpret_cast<T*>(new char[sizeof(T) * capacity])),
+ next_(nullptr) {
+ DCHECK_GE(capacity_, kMinimumRingSize);
+ }
+
+ ~Ring() {
+ while (!empty()) {
+ pop_front();
+ }
+ delete[] reinterpret_cast<char*>(data_);
+ }
+
+ bool empty() const { return back_index_ == front_index_; }
+
+ size_t capacity() const { return capacity_; }
+
+ bool CanPush() const {
+ return front_index_ != CircularIncrement(back_index_);
+ }
+
+ void push_front(T&& t) {
+ // Mustn't appear to become empty.
+ DCHECK_NE(CircularDecrement(front_index_), back_index_);
+ new (&data_[front_index_]) T(std::move(t));
+ front_index_ = CircularDecrement(front_index_);
+ }
+
+ void push_back(T&& t) {
+ back_index_ = CircularIncrement(back_index_);
+ DCHECK(!empty()); // Mustn't appear to become empty.
+ new (&data_[back_index_]) T(std::move(t));
+ }
+
+ bool CanPop() const { return front_index_ != back_index_; }
+
+ void pop_front() {
+ DCHECK(!empty());
+ front_index_ = CircularIncrement(front_index_);
+ data_[front_index_].~T();
+ }
+
+ T& front() {
+ DCHECK(!empty());
+ return data_[CircularIncrement(front_index_)];
+ }
+
+ const T& front() const {
+ DCHECK(!empty());
+ return data_[CircularIncrement(front_index_)];
+ }
+
+ T& back() {
+ DCHECK(!empty());
+ return data_[back_index_];
+ }
+
+ const T& back() const {
+ DCHECK(!empty());
+ return data_[back_index_];
+ }
+
+ size_t CircularDecrement(size_t index) const {
+ if (index == 0)
+ return capacity_ - 1;
+ return index - 1;
+ }
+
+ size_t CircularIncrement(size_t index) const {
+ DCHECK_LT(index, capacity_);
+ ++index;
+ if (index == capacity_)
+ return 0;
+ return index;
+ }
+
+ size_t capacity_;
+ size_t front_index_;
+ size_t back_index_;
+ T* data_;
+ std::unique_ptr<Ring> next_;
+
+ DISALLOW_COPY_AND_ASSIGN(Ring);
+ };
+
+ public:
+ class Iterator {
+ public:
+ using value_type = T;
+ using pointer = const T*;
+ using reference = const T&;
+
+ const T& operator->() const { return ring_->data_[index_]; }
+ const T& operator*() const { return ring_->data_[index_]; }
+
+ Iterator& operator++() {
+ if (index_ == ring_->back_index_) {
+ ring_ = ring_->next_.get();
+ index_ = 0;
+ } else {
+ index_ = ring_->CircularIncrement(index_);
+ }
+ return *this;
+ }
+
+ operator bool() const { return !!ring_; }
+
+ private:
+ explicit Iterator(const Ring* ring) {
+ if (!ring || ring->empty()) {
+ ring_ = nullptr;
+ index_ = 0;
+ return;
+ }
+
+ ring_ = ring;
+ index_ = ring_->CircularIncrement(ring->front_index_);
+ }
+
+ const Ring* ring_;
+ size_t index_;
+
+ friend class LazilyDeallocatedDeque;
+ };
+
+ Iterator begin() const { return Iterator(head_.get()); }
+
+ Iterator end() const { return Iterator(nullptr); }
+
+ private:
+ // We maintain a list of Ring buffers, to enable us to grow without copying,
+ // but most of the time we aim to have only one active Ring.
+ std::unique_ptr<Ring> head_;
+ Ring* tail_ = nullptr;
+
+ size_t size_ = 0;
+ size_t max_size_ = 0;
+ TimeTicks next_resize_time_;
+
+ DISALLOW_COPY_AND_ASSIGN(LazilyDeallocatedDeque);
+};
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_LAZILY_DEALLOCATED_DEQUE_H_
diff --git a/chromium/base/task/sequence_manager/lazily_deallocated_deque_unittest.cc b/chromium/base/task/sequence_manager/lazily_deallocated_deque_unittest.cc
new file mode 100644
index 00000000000..2afa048ac9d
--- /dev/null
+++ b/chromium/base/task/sequence_manager/lazily_deallocated_deque_unittest.cc
@@ -0,0 +1,364 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/lazily_deallocated_deque.h"
+
+#include "base/time/time_override.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+class LazilyDeallocatedDequeTest : public testing::Test {};
+
+TEST_F(LazilyDeallocatedDequeTest, InitiallyEmpty) {
+ LazilyDeallocatedDeque<int> d;
+
+ EXPECT_TRUE(d.empty());
+ EXPECT_EQ(0u, d.size());
+}
+
+TEST_F(LazilyDeallocatedDequeTest, PushBackAndPopFront1) {
+ LazilyDeallocatedDeque<int> d;
+
+ d.push_back(123);
+
+ EXPECT_FALSE(d.empty());
+ EXPECT_EQ(1u, d.size());
+
+ EXPECT_EQ(123, d.front());
+
+ d.pop_front();
+ EXPECT_TRUE(d.empty());
+ EXPECT_EQ(0u, d.size());
+}
+
+TEST_F(LazilyDeallocatedDequeTest, PushBackAndPopFront1000) {
+ LazilyDeallocatedDeque<int> d;
+
+ for (int i = 0; i < 1000; i++) {
+ d.push_back(i);
+ }
+
+ EXPECT_EQ(0, d.front());
+ EXPECT_EQ(999, d.back());
+ EXPECT_EQ(1000u, d.size());
+
+ for (int i = 0; i < 1000; i++) {
+ EXPECT_EQ(i, d.front());
+ d.pop_front();
+ }
+
+ EXPECT_EQ(0u, d.size());
+}
+
+TEST_F(LazilyDeallocatedDequeTest, PushFrontBackAndPopFront1) {
+ LazilyDeallocatedDeque<int> d;
+
+ d.push_front(123);
+
+ EXPECT_FALSE(d.empty());
+ EXPECT_EQ(1u, d.size());
+
+ EXPECT_EQ(123, d.front());
+
+ d.pop_front();
+ EXPECT_TRUE(d.empty());
+ EXPECT_EQ(0u, d.size());
+}
+
+TEST_F(LazilyDeallocatedDequeTest, PushFrontAndPopFront1000) {
+ LazilyDeallocatedDeque<int> d;
+
+ for (int i = 0; i < 1000; i++) {
+ d.push_front(i);
+ }
+
+ EXPECT_EQ(999, d.front());
+ EXPECT_EQ(0, d.back());
+ EXPECT_EQ(1000u, d.size());
+
+ for (int i = 0; i < 1000; i++) {
+ EXPECT_EQ(999 - i, d.front());
+ d.pop_front();
+ }
+
+ EXPECT_EQ(0u, d.size());
+}
+
+TEST_F(LazilyDeallocatedDequeTest, MaybeShrinkQueueWithLargeSizeDrop) {
+ LazilyDeallocatedDeque<int> d;
+
+ for (int i = 0; i < 1000; i++) {
+ d.push_back(i);
+ }
+ EXPECT_EQ(1000u, d.size());
+ EXPECT_EQ(1020u, d.capacity());
+ EXPECT_EQ(1000u, d.max_size());
+
+ // Drop most elements.
+ for (int i = 0; i < 990; i++) {
+ d.pop_front();
+ }
+ EXPECT_EQ(10u, d.size());
+ EXPECT_EQ(512u, d.capacity());
+ EXPECT_EQ(1000u, d.max_size());
+
+ // This won't do anything since the max size is greater than the current
+ // capacity.
+ d.MaybeShrinkQueue();
+ EXPECT_EQ(512u, d.capacity());
+ EXPECT_EQ(10u, d.max_size());
+
+ // This will shrink because the max size is now much less than the current
+ // capacity.
+ d.MaybeShrinkQueue();
+ EXPECT_EQ(11u, d.capacity());
+}
+
+TEST_F(LazilyDeallocatedDequeTest, MaybeShrinkQueueWithSmallSizeDrop) {
+ LazilyDeallocatedDeque<int> d;
+
+ for (int i = 0; i < 1010; i++) {
+ d.push_back(i);
+ }
+ EXPECT_EQ(1010u, d.size());
+ EXPECT_EQ(1020u, d.capacity());
+ EXPECT_EQ(1010u, d.max_size());
+
+ // Drop a couple of elements.
+ d.pop_front();
+ d.pop_front();
+ EXPECT_EQ(1008u, d.size());
+ EXPECT_EQ(1020u, d.capacity());
+ EXPECT_EQ(1010u, d.max_size());
+
+ // This won't do anything since the max size is only slightly lower than the
+ // capacity.
+ EXPECT_EQ(1020u, d.capacity());
+ EXPECT_EQ(1010u, d.max_size());
+
+ // Ditto. Nothing changed so no point shrinking.
+ d.MaybeShrinkQueue();
+ EXPECT_EQ(1008u, d.max_size());
+ EXPECT_EQ(1020u, d.capacity());
+}
+
+TEST_F(LazilyDeallocatedDequeTest, MaybeShrinkQueueToEmpty) {
+ LazilyDeallocatedDeque<int> d;
+
+ for (int i = 0; i < 1000; i++) {
+ d.push_front(i);
+ }
+
+ for (int i = 0; i < 1000; i++) {
+ d.pop_front();
+ }
+
+ d.MaybeShrinkQueue();
+ EXPECT_EQ(0u, d.max_size());
+ EXPECT_EQ(LazilyDeallocatedDeque<int>::kMinimumRingSize, d.capacity());
+}
+
+namespace {
+TimeTicks fake_now;
+}
+
+TEST_F(LazilyDeallocatedDequeTest, MaybeShrinkQueueRateLimiting) {
+ subtle::ScopedTimeClockOverrides time_overrides(
+ nullptr, []() { return fake_now; }, nullptr);
+ LazilyDeallocatedDeque<int> d;
+
+ for (int i = 0; i < 1000; i++) {
+ d.push_back(i);
+ }
+ EXPECT_EQ(1000u, d.size());
+ EXPECT_EQ(1020u, d.capacity());
+ EXPECT_EQ(1000u, d.max_size());
+
+ // Drop some elements.
+ for (int i = 0; i < 100; i++) {
+ d.pop_front();
+ }
+ EXPECT_EQ(900u, d.size());
+ EXPECT_EQ(960u, d.capacity());
+ EXPECT_EQ(1000u, d.max_size());
+
+ // This won't do anything since the max size is greater than the current
+ // capacity.
+ d.MaybeShrinkQueue();
+ EXPECT_EQ(960u, d.capacity());
+ EXPECT_EQ(900u, d.max_size());
+
+ // This will shrink to fit.
+ d.MaybeShrinkQueue();
+ EXPECT_EQ(901u, d.capacity());
+ EXPECT_EQ(900u, d.max_size());
+
+ // Drop some more elements.
+ for (int i = 0; i < 100; i++) {
+ d.pop_front();
+ }
+ EXPECT_EQ(800u, d.size());
+ EXPECT_EQ(901u, d.capacity());
+ EXPECT_EQ(900u, d.max_size());
+
+ // Not enough time has passed so max_size is untouched and not shrunk.
+ d.MaybeShrinkQueue();
+ EXPECT_EQ(900u, d.max_size());
+ EXPECT_EQ(901u, d.capacity());
+
+ // After time passes we re-sample max_size.
+ fake_now += TimeDelta::FromSeconds(
+ LazilyDeallocatedDeque<int>::kMinimumShrinkIntervalInSeconds);
+ d.MaybeShrinkQueue();
+ EXPECT_EQ(800u, d.max_size());
+ EXPECT_EQ(901u, d.capacity());
+
+ // And The next call to MaybeShrinkQueue actually shrinks the queue.
+ d.MaybeShrinkQueue();
+ EXPECT_EQ(800u, d.max_size());
+ EXPECT_EQ(801u, d.capacity());
+}
+
+TEST_F(LazilyDeallocatedDequeTest, Iterators) {
+ LazilyDeallocatedDeque<int> d;
+
+ d.push_back(1);
+ d.push_back(2);
+ d.push_back(3);
+
+ auto iter = d.begin();
+ EXPECT_EQ(1, *iter);
+ EXPECT_NE(++iter, d.end());
+
+ EXPECT_EQ(2, *iter);
+ EXPECT_NE(++iter, d.end());
+
+ EXPECT_EQ(3, *iter);
+ EXPECT_EQ(++iter, d.end());
+}
+
+TEST_F(LazilyDeallocatedDequeTest, PushBackAndFront) {
+ LazilyDeallocatedDeque<int> d;
+
+ int j = 1;
+ for (int i = 0; i < 1000; i++) {
+ d.push_back(j++);
+ d.push_back(j++);
+ d.push_back(j++);
+ d.push_back(j++);
+ d.push_front(-i);
+ }
+
+ for (int i = -999; i < 4000; i++) {
+ EXPECT_EQ(d.front(), i);
+ d.pop_front();
+ }
+}
+
+TEST_F(LazilyDeallocatedDequeTest, SetCapacity) {
+ LazilyDeallocatedDeque<int> d;
+ for (int i = 0; i < 1000; i++) {
+ d.push_back(i);
+ }
+
+ EXPECT_EQ(1020u, d.capacity());
+
+ // We need 1 more spot than the size due to the way the Ring works.
+ d.SetCapacity(1001);
+
+ for (int i = 0; i < 1000; i++) {
+ EXPECT_EQ(d.front(), i);
+ d.pop_front();
+ }
+}
+
+TEST_F(LazilyDeallocatedDequeTest, RingPushFront) {
+ LazilyDeallocatedDeque<int>::Ring r(4);
+
+ r.push_front(1);
+ r.push_front(2);
+ r.push_front(3);
+
+ EXPECT_EQ(3, r.front());
+ EXPECT_EQ(1, r.back());
+}
+
+TEST_F(LazilyDeallocatedDequeTest, RingPushBack) {
+ LazilyDeallocatedDeque<int>::Ring r(4);
+
+ r.push_back(1);
+ r.push_back(2);
+ r.push_back(3);
+
+ EXPECT_EQ(1, r.front());
+ EXPECT_EQ(3, r.back());
+}
+
+TEST_F(LazilyDeallocatedDequeTest, RingCanPush) {
+ LazilyDeallocatedDeque<int>::Ring r1(4);
+ LazilyDeallocatedDeque<int>::Ring r2(4);
+
+ for (int i = 0; i < 3; i++) {
+ EXPECT_TRUE(r1.CanPush());
+ r1.push_back(0);
+
+ EXPECT_TRUE(r2.CanPush());
+ r2.push_back(0);
+ }
+
+ EXPECT_FALSE(r1.CanPush());
+ EXPECT_FALSE(r2.CanPush());
+}
+
+TEST_F(LazilyDeallocatedDequeTest, RingPushPopPushPop) {
+ LazilyDeallocatedDeque<int>::Ring r(4);
+
+ EXPECT_FALSE(r.CanPop());
+ EXPECT_TRUE(r.CanPush());
+ r.push_back(1);
+ EXPECT_TRUE(r.CanPop());
+ EXPECT_TRUE(r.CanPush());
+ r.push_back(2);
+ EXPECT_TRUE(r.CanPush());
+ r.push_back(3);
+ EXPECT_FALSE(r.CanPush());
+
+ EXPECT_TRUE(r.CanPop());
+ EXPECT_EQ(1, r.front());
+ r.pop_front();
+ EXPECT_TRUE(r.CanPop());
+ EXPECT_EQ(2, r.front());
+ r.pop_front();
+ EXPECT_TRUE(r.CanPop());
+ EXPECT_EQ(3, r.front());
+ r.pop_front();
+ EXPECT_FALSE(r.CanPop());
+
+ EXPECT_TRUE(r.CanPush());
+ r.push_back(10);
+ EXPECT_TRUE(r.CanPush());
+ r.push_back(20);
+ EXPECT_TRUE(r.CanPush());
+ r.push_back(30);
+ EXPECT_FALSE(r.CanPush());
+
+ EXPECT_TRUE(r.CanPop());
+ EXPECT_EQ(10, r.front());
+ r.pop_front();
+ EXPECT_TRUE(r.CanPop());
+ EXPECT_EQ(20, r.front());
+ r.pop_front();
+ EXPECT_TRUE(r.CanPop());
+ EXPECT_EQ(30, r.front());
+ r.pop_front();
+
+ EXPECT_FALSE(r.CanPop());
+}
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
diff --git a/chromium/base/task/sequence_manager/lazy_now.cc b/chromium/base/task/sequence_manager/lazy_now.cc
index a92144be83f..b391b32a4e9 100644
--- a/chromium/base/task/sequence_manager/lazy_now.cc
+++ b/chromium/base/task/sequence_manager/lazy_now.cc
@@ -16,7 +16,7 @@ LazyNow::LazyNow(const TickClock* tick_clock)
DCHECK(tick_clock);
}
-LazyNow::LazyNow(LazyNow&& move_from)
+LazyNow::LazyNow(LazyNow&& move_from) noexcept
: tick_clock_(move_from.tick_clock_), now_(move_from.now_) {
move_from.tick_clock_ = nullptr;
move_from.now_ = nullopt;
diff --git a/chromium/base/task/sequence_manager/moveable_auto_lock.h b/chromium/base/task/sequence_manager/moveable_auto_lock.h
new file mode 100644
index 00000000000..a80d5f8a74d
--- /dev/null
+++ b/chromium/base/task/sequence_manager/moveable_auto_lock.h
@@ -0,0 +1,41 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_MOVEABLE_AUTO_LOCK_H_
+#define BASE_TASK_SEQUENCE_MANAGER_MOVEABLE_AUTO_LOCK_H_
+
+#include "base/synchronization/lock.h"
+
+namespace base {
+namespace sequence_manager {
+
+class MoveableAutoLock {
+ public:
+ explicit MoveableAutoLock(Lock& lock) : lock_(lock), moved_(false) {
+ lock_.Acquire();
+ }
+
+ MoveableAutoLock(MoveableAutoLock&& other) noexcept
+ : lock_(other.lock_), moved_(other.moved_) {
+ lock_.AssertAcquired();
+ other.moved_ = true;
+ }
+
+ ~MoveableAutoLock() {
+ if (moved_)
+ return;
+ lock_.AssertAcquired();
+ lock_.Release();
+ }
+
+ private:
+ Lock& lock_;
+ bool moved_;
+ DISALLOW_COPY_AND_ASSIGN(MoveableAutoLock);
+};
+
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_MOVEABLE_AUTO_LOCK_H_
diff --git a/chromium/base/task/sequence_manager/real_time_domain.cc b/chromium/base/task/sequence_manager/real_time_domain.cc
new file mode 100644
index 00000000000..6a6caf094d0
--- /dev/null
+++ b/chromium/base/task/sequence_manager/real_time_domain.cc
@@ -0,0 +1,48 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/real_time_domain.h"
+
+#include "base/task/sequence_manager/sequence_manager.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+RealTimeDomain::RealTimeDomain() {}
+
+RealTimeDomain::~RealTimeDomain() = default;
+
+LazyNow RealTimeDomain::CreateLazyNow() const {
+ return LazyNow(sequence_manager()->GetTickClock());
+}
+
+TimeTicks RealTimeDomain::Now() const {
+ return sequence_manager()->NowTicks();
+}
+
+Optional<TimeDelta> RealTimeDomain::DelayTillNextTask(LazyNow* lazy_now) {
+ Optional<TimeTicks> next_run_time = NextScheduledRunTime();
+ if (!next_run_time)
+ return nullopt;
+
+ TimeTicks now = lazy_now->Now();
+ if (now >= next_run_time) {
+ // Overdue work needs to be run immediately.
+ return TimeDelta();
+ }
+
+ TimeDelta delay = *next_run_time - now;
+ TRACE_EVENT1("sequence_manager", "RealTimeDomain::DelayTillNextTask",
+ "delay_ms", delay.InMillisecondsF());
+ return delay;
+}
+
+const char* RealTimeDomain::GetName() const {
+ return "RealTimeDomain";
+}
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
diff --git a/chromium/base/task/sequence_manager/real_time_domain.h b/chromium/base/task/sequence_manager/real_time_domain.h
new file mode 100644
index 00000000000..4923ebf06ea
--- /dev/null
+++ b/chromium/base/task/sequence_manager/real_time_domain.h
@@ -0,0 +1,37 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_REAL_TIME_DOMAIN_H_
+#define BASE_TASK_SEQUENCE_MANAGER_REAL_TIME_DOMAIN_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/task/sequence_manager/time_domain.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+class BASE_EXPORT RealTimeDomain : public TimeDomain {
+ public:
+ RealTimeDomain();
+ ~RealTimeDomain() override;
+
+ // TimeDomain implementation:
+ LazyNow CreateLazyNow() const override;
+ TimeTicks Now() const override;
+ Optional<TimeDelta> DelayTillNextTask(LazyNow* lazy_now) override;
+
+ protected:
+ const char* GetName() const override;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(RealTimeDomain);
+};
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_REAL_TIME_DOMAIN_H_
diff --git a/chromium/base/task/sequence_manager/sequence_manager.cc b/chromium/base/task/sequence_manager/sequence_manager.cc
new file mode 100644
index 00000000000..3451f98fe8d
--- /dev/null
+++ b/chromium/base/task/sequence_manager/sequence_manager.cc
@@ -0,0 +1,26 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/sequence_manager.h"
+
+namespace base {
+namespace sequence_manager {
+
+SequenceManager::MetricRecordingSettings::MetricRecordingSettings() {}
+
+SequenceManager::MetricRecordingSettings::MetricRecordingSettings(
+ bool cpu_time_for_each_task,
+ double task_thread_time_sampling_rate)
+ : records_cpu_time_for_each_task(base::ThreadTicks::IsSupported() &&
+ cpu_time_for_each_task),
+ task_sampling_rate_for_recording_cpu_time(
+ task_thread_time_sampling_rate) {
+ if (records_cpu_time_for_each_task)
+ task_sampling_rate_for_recording_cpu_time = 1;
+ if (!base::ThreadTicks::IsSupported())
+ task_sampling_rate_for_recording_cpu_time = 0;
+}
+
+} // namespace sequence_manager
+} // namespace base
diff --git a/chromium/base/task/sequence_manager/sequence_manager.h b/chromium/base/task/sequence_manager/sequence_manager.h
new file mode 100644
index 00000000000..41e56ec03ad
--- /dev/null
+++ b/chromium/base/task/sequence_manager/sequence_manager.h
@@ -0,0 +1,132 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_SEQUENCE_MANAGER_H_
+#define BASE_TASK_SEQUENCE_MANAGER_SEQUENCE_MANAGER_H_
+
+#include <memory>
+#include <utility>
+
+#include "base/message_loop/message_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "base/task/sequence_manager/task_queue_impl.h"
+#include "base/task/sequence_manager/task_time_observer.h"
+
+namespace base {
+namespace sequence_manager {
+
+class TimeDomain;
+
+// SequenceManager manages TaskQueues which have different properties
+// (e.g. priority, common task type) multiplexing all posted tasks into
+// a single backing sequence (currently bound to a single thread, which is
+// refererred as *main thread* in the comments below). SequenceManager
+// implementation can be used in a various ways to apply scheduling logic.
+class SequenceManager {
+ public:
+ class Observer {
+ public:
+ virtual ~Observer() = default;
+ // Called back on the main thread.
+ virtual void OnBeginNestedRunLoop() = 0;
+ virtual void OnExitNestedRunLoop() = 0;
+ };
+
+ struct MetricRecordingSettings {
+ MetricRecordingSettings();
+ // Note: These parameters are desired and MetricRecordingSetting's will
+ // update them for consistency (e.g. setting values to false when
+ // ThreadTicks are not supported).
+ MetricRecordingSettings(bool records_cpu_time_for_each_task,
+ double task_sampling_rate_for_recording_cpu_time);
+
+ // True if cpu time is measured for each task, so the integral
+ // metrics (as opposed to per-task metrics) can be recorded.
+ bool records_cpu_time_for_each_task = false;
+ // The proportion of the tasks for which the cpu time will be
+ // sampled or 0 if this is not enabled.
+ // This value is always 1 if the |records_cpu_time_for_each_task| is true.
+ double task_sampling_rate_for_recording_cpu_time = 0;
+ };
+
+ virtual ~SequenceManager() = default;
+
+ // TODO(kraynov): Bring back CreateOnCurrentThread static method here
+ // when the move is done. It's not here yet to reduce PLATFORM_EXPORT
+ // macros hacking during the move.
+
+ // Must be called on the main thread.
+ // Can be called only once, before creating TaskQueues.
+ // Observer must outlive the SequenceManager.
+ virtual void SetObserver(Observer* observer) = 0;
+
+ // Must be called on the main thread.
+ virtual void AddTaskObserver(MessageLoop::TaskObserver* task_observer) = 0;
+ virtual void RemoveTaskObserver(MessageLoop::TaskObserver* task_observer) = 0;
+ virtual void AddTaskTimeObserver(TaskTimeObserver* task_time_observer) = 0;
+ virtual void RemoveTaskTimeObserver(TaskTimeObserver* task_time_observer) = 0;
+
+ // Registers a TimeDomain with SequenceManager.
+ // TaskQueues must only be created with a registered TimeDomain.
+ // Conversely, any TimeDomain must remain registered until no
+ // TaskQueues (using that TimeDomain) remain.
+ virtual void RegisterTimeDomain(TimeDomain* time_domain) = 0;
+ virtual void UnregisterTimeDomain(TimeDomain* time_domain) = 0;
+
+ virtual TimeDomain* GetRealTimeDomain() const = 0;
+ virtual const TickClock* GetTickClock() const = 0;
+ virtual TimeTicks NowTicks() const = 0;
+
+ // Sets the SingleThreadTaskRunner that will be returned by
+ // ThreadTaskRunnerHandle::Get on the main thread.
+ virtual void SetDefaultTaskRunner(
+ scoped_refptr<SingleThreadTaskRunner> task_runner) = 0;
+
+ // Removes all canceled delayed tasks.
+ virtual void SweepCanceledDelayedTasks() = 0;
+
+ // Returns true if no tasks were executed in TaskQueues that monitor
+ // quiescence since the last call to this method.
+ virtual bool GetAndClearSystemIsQuiescentBit() = 0;
+
+ // Set the number of tasks executed in a single SequenceManager invocation.
+ // Increasing this number reduces the overhead of the tasks dispatching
+ // logic at the cost of a potentially worse latency. 1 by default.
+ virtual void SetWorkBatchSize(int work_batch_size) = 0;
+
+ // Enables crash keys that can be set in the scope of a task which help
+ // to identify the culprit if upcoming work results in a crash.
+ // Key names must be thread-specific to avoid races and corrupted crash dumps.
+ virtual void EnableCrashKeys(const char* file_name_crash_key,
+ const char* function_name_crash_key) = 0;
+
+ // Returns the metric recording configuration for the current SequenceManager.
+ virtual const MetricRecordingSettings& GetMetricRecordingSettings() const = 0;
+
+ // Creates a task queue with the given type, |spec| and args.
+ // Must be called on the main thread.
+ // TODO(scheduler-dev): SequenceManager should not create TaskQueues.
+ template <typename TaskQueueType, typename... Args>
+ scoped_refptr<TaskQueueType> CreateTaskQueue(const TaskQueue::Spec& spec,
+ Args&&... args) {
+ return WrapRefCounted(new TaskQueueType(CreateTaskQueueImpl(spec), spec,
+ std::forward<Args>(args)...));
+ }
+
+ protected:
+ virtual std::unique_ptr<internal::TaskQueueImpl> CreateTaskQueueImpl(
+ const TaskQueue::Spec& spec) = 0;
+};
+
+// Create SequenceManager using MessageLoop on the current thread.
+// Implementation is located in sequence_manager_impl.cc.
+// TODO(scheduler-dev): Rename to TakeOverCurrentThread when we'll stop using
+// MessageLoop and will actually take over a thread.
+BASE_EXPORT std::unique_ptr<SequenceManager>
+CreateSequenceManagerOnCurrentThread();
+
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_SEQUENCE_MANAGER_H_
diff --git a/chromium/base/task/sequence_manager/sequence_manager_impl.cc b/chromium/base/task/sequence_manager/sequence_manager_impl.cc
new file mode 100644
index 00000000000..7afea9c3fc8
--- /dev/null
+++ b/chromium/base/task/sequence_manager/sequence_manager_impl.cc
@@ -0,0 +1,724 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/sequence_manager_impl.h"
+
+#include <queue>
+#include <vector>
+
+#include "base/bind.h"
+#include "base/bit_cast.h"
+#include "base/compiler_specific.h"
+#include "base/debug/crash_logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/rand_util.h"
+#include "base/task/sequence_manager/real_time_domain.h"
+#include "base/task/sequence_manager/task_time_observer.h"
+#include "base/task/sequence_manager/thread_controller_impl.h"
+#include "base/task/sequence_manager/work_queue.h"
+#include "base/task/sequence_manager/work_queue_sets.h"
+#include "base/time/default_tick_clock.h"
+#include "base/time/tick_clock.h"
+#include "base/trace_event/trace_event.h"
+#include "build/build_config.h"
+
+namespace base {
+namespace sequence_manager {
+
+std::unique_ptr<SequenceManager> CreateSequenceManagerOnCurrentThread() {
+ return internal::SequenceManagerImpl::CreateOnCurrentThread();
+}
+
+namespace internal {
+
+namespace {
+
+constexpr base::TimeDelta kLongTaskTraceEventThreshold =
+ base::TimeDelta::FromMilliseconds(50);
+// Proportion of tasks which will record thread time for metrics.
+const double kTaskSamplingRateForRecordingCPUTime = 0.01;
+// Proprortion of SequenceManagers which will record thread time for each task,
+// enabling advanced metrics.
+const double kThreadSamplingRateForRecordingCPUTime = 0.0001;
+
+// Magic value to protect against memory corruption and bail out
+// early when detected.
+constexpr int kMemoryCorruptionSentinelValue = 0xdeadbeef;
+
+void SweepCanceledDelayedTasksInQueue(
+ internal::TaskQueueImpl* queue,
+ std::map<TimeDomain*, TimeTicks>* time_domain_now) {
+ TimeDomain* time_domain = queue->GetTimeDomain();
+ if (time_domain_now->find(time_domain) == time_domain_now->end())
+ time_domain_now->insert(std::make_pair(time_domain, time_domain->Now()));
+ queue->SweepCanceledDelayedTasks(time_domain_now->at(time_domain));
+}
+
+SequenceManager::MetricRecordingSettings InitializeMetricRecordingSettings() {
+ bool cpu_time_recording_always_on =
+ base::RandDouble() < kThreadSamplingRateForRecordingCPUTime;
+ return SequenceManager::MetricRecordingSettings(
+ cpu_time_recording_always_on, kTaskSamplingRateForRecordingCPUTime);
+}
+
+} // namespace
+
+SequenceManagerImpl::SequenceManagerImpl(
+ std::unique_ptr<internal::ThreadController> controller)
+ : graceful_shutdown_helper_(new internal::GracefulQueueShutdownHelper()),
+ controller_(std::move(controller)),
+ metric_recording_settings_(InitializeMetricRecordingSettings()),
+ memory_corruption_sentinel_(kMemoryCorruptionSentinelValue),
+ weak_factory_(this) {
+ // TODO(altimin): Create a sequence checker here.
+ DCHECK(controller_->RunsTasksInCurrentSequence());
+
+ TRACE_EVENT_WARMUP_CATEGORY("sequence_manager");
+ TRACE_EVENT_WARMUP_CATEGORY(TRACE_DISABLED_BY_DEFAULT("sequence_manager"));
+ TRACE_EVENT_WARMUP_CATEGORY(
+ TRACE_DISABLED_BY_DEFAULT("sequence_manager.debug"));
+ TRACE_EVENT_WARMUP_CATEGORY(
+ TRACE_DISABLED_BY_DEFAULT("sequence_manager.verbose_snapshots"));
+
+ TRACE_EVENT_OBJECT_CREATED_WITH_ID(
+ TRACE_DISABLED_BY_DEFAULT("sequence_manager"), "SequenceManager", this);
+ main_thread_only().selector.SetTaskQueueSelectorObserver(this);
+
+ RegisterTimeDomain(main_thread_only().real_time_domain.get());
+
+ controller_->SetSequencedTaskSource(this);
+ controller_->AddNestingObserver(this);
+}
+
+SequenceManagerImpl::~SequenceManagerImpl() {
+ TRACE_EVENT_OBJECT_DELETED_WITH_ID(
+ TRACE_DISABLED_BY_DEFAULT("sequence_manager"), "SequenceManager", this);
+
+ // TODO(altimin): restore default task runner automatically when
+ // ThreadController is destroyed.
+ controller_->RestoreDefaultTaskRunner();
+
+ for (internal::TaskQueueImpl* queue : main_thread_only().active_queues) {
+ main_thread_only().selector.RemoveQueue(queue);
+ queue->UnregisterTaskQueue();
+ }
+
+ main_thread_only().active_queues.clear();
+ main_thread_only().queues_to_gracefully_shutdown.clear();
+
+ graceful_shutdown_helper_->OnSequenceManagerDeleted();
+
+ main_thread_only().selector.SetTaskQueueSelectorObserver(nullptr);
+ controller_->RemoveNestingObserver(this);
+}
+
+SequenceManagerImpl::AnyThread::AnyThread() = default;
+
+SequenceManagerImpl::AnyThread::~AnyThread() = default;
+
+SequenceManagerImpl::MainThreadOnly::MainThreadOnly()
+ : random_generator(RandUint64()),
+ uniform_distribution(0.0, 1.0),
+ real_time_domain(new internal::RealTimeDomain()) {}
+
+SequenceManagerImpl::MainThreadOnly::~MainThreadOnly() = default;
+
+// static
+std::unique_ptr<SequenceManagerImpl>
+SequenceManagerImpl::CreateOnCurrentThread() {
+ return WrapUnique(
+ new SequenceManagerImpl(internal::ThreadControllerImpl::Create(
+ MessageLoop::current(), DefaultTickClock::GetInstance())));
+}
+
+void SequenceManagerImpl::RegisterTimeDomain(TimeDomain* time_domain) {
+ main_thread_only().time_domains.insert(time_domain);
+ time_domain->OnRegisterWithSequenceManager(this);
+}
+
+void SequenceManagerImpl::UnregisterTimeDomain(TimeDomain* time_domain) {
+ main_thread_only().time_domains.erase(time_domain);
+}
+
+TimeDomain* SequenceManagerImpl::GetRealTimeDomain() const {
+ return main_thread_only().real_time_domain.get();
+}
+
+std::unique_ptr<internal::TaskQueueImpl>
+SequenceManagerImpl::CreateTaskQueueImpl(const TaskQueue::Spec& spec) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ TimeDomain* time_domain = spec.time_domain
+ ? spec.time_domain
+ : main_thread_only().real_time_domain.get();
+ DCHECK(main_thread_only().time_domains.find(time_domain) !=
+ main_thread_only().time_domains.end());
+ std::unique_ptr<internal::TaskQueueImpl> task_queue =
+ std::make_unique<internal::TaskQueueImpl>(this, time_domain, spec);
+ main_thread_only().active_queues.insert(task_queue.get());
+ main_thread_only().selector.AddQueue(task_queue.get());
+ return task_queue;
+}
+
+void SequenceManagerImpl::SetObserver(Observer* observer) {
+ main_thread_only().observer = observer;
+}
+
+bool SequenceManagerImpl::AddToIncomingImmediateWorkList(
+ internal::TaskQueueImpl* task_queue,
+ internal::EnqueueOrder enqueue_order) {
+ AutoLock lock(any_thread_lock_);
+ // Check if |task_queue| is already in the linked list.
+ if (task_queue->immediate_work_list_storage()->queue)
+ return false;
+
+ // Insert into the linked list.
+ task_queue->immediate_work_list_storage()->queue = task_queue;
+ task_queue->immediate_work_list_storage()->order = enqueue_order;
+ task_queue->immediate_work_list_storage()->next =
+ any_thread().incoming_immediate_work_list;
+ any_thread().incoming_immediate_work_list =
+ task_queue->immediate_work_list_storage();
+ return true;
+}
+
+void SequenceManagerImpl::RemoveFromIncomingImmediateWorkList(
+ internal::TaskQueueImpl* task_queue) {
+ AutoLock lock(any_thread_lock_);
+ internal::IncomingImmediateWorkList** prev =
+ &any_thread().incoming_immediate_work_list;
+ while (*prev) {
+ if ((*prev)->queue == task_queue) {
+ *prev = (*prev)->next;
+ break;
+ }
+ prev = &(*prev)->next;
+ }
+
+ task_queue->immediate_work_list_storage()->next = nullptr;
+ task_queue->immediate_work_list_storage()->queue = nullptr;
+}
+
+void SequenceManagerImpl::UnregisterTaskQueueImpl(
+ std::unique_ptr<internal::TaskQueueImpl> task_queue) {
+ TRACE_EVENT1("sequence_manager", "SequenceManagerImpl::UnregisterTaskQueue",
+ "queue_name", task_queue->GetName());
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+
+ main_thread_only().selector.RemoveQueue(task_queue.get());
+
+ // After UnregisterTaskQueue returns no new tasks can be posted.
+ // It's important to call it first to avoid race condition between removing
+ // the task queue from various lists here and adding it to the same lists
+ // when posting a task.
+ task_queue->UnregisterTaskQueue();
+
+ // Remove |task_queue| from the linked list if present.
+ // This is O(n). We assume this will be a relatively infrequent operation.
+ RemoveFromIncomingImmediateWorkList(task_queue.get());
+
+ // Add |task_queue| to |main_thread_only().queues_to_delete| so we can prevent
+ // it from being freed while any of our structures hold hold a raw pointer to
+ // it.
+ main_thread_only().active_queues.erase(task_queue.get());
+ main_thread_only().queues_to_delete[task_queue.get()] = std::move(task_queue);
+}
+
+void SequenceManagerImpl::ReloadEmptyWorkQueues() {
+ // There are two cases where a queue needs reloading. First, it might be
+ // completely empty and we've just posted a task (this method handles that
+ // case). Secondly if the work queue becomes empty in when calling
+ // WorkQueue::TakeTaskFromWorkQueue (handled there).
+ for (internal::TaskQueueImpl* queue : main_thread_only().queues_to_reload) {
+ queue->ReloadImmediateWorkQueueIfEmpty();
+ }
+}
+
+void SequenceManagerImpl::WakeUpReadyDelayedQueues(LazyNow* lazy_now) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
+ "SequenceManagerImpl::WakeUpReadyDelayedQueues");
+
+ for (TimeDomain* time_domain : main_thread_only().time_domains) {
+ if (time_domain == main_thread_only().real_time_domain.get()) {
+ time_domain->WakeUpReadyDelayedQueues(lazy_now);
+ } else {
+ LazyNow time_domain_lazy_now = time_domain->CreateLazyNow();
+ time_domain->WakeUpReadyDelayedQueues(&time_domain_lazy_now);
+ }
+ }
+}
+
+void SequenceManagerImpl::OnBeginNestedRunLoop() {
+ main_thread_only().nesting_depth++;
+ if (main_thread_only().observer)
+ main_thread_only().observer->OnBeginNestedRunLoop();
+}
+
+void SequenceManagerImpl::OnExitNestedRunLoop() {
+ main_thread_only().nesting_depth--;
+ DCHECK_GE(main_thread_only().nesting_depth, 0);
+ if (main_thread_only().nesting_depth == 0) {
+ // While we were nested some non-nestable tasks may have been deferred.
+ // We push them back onto the *front* of their original work queues,
+ // that's why we iterate |non_nestable_task_queue| in FIFO order.
+ while (!main_thread_only().non_nestable_task_queue.empty()) {
+ internal::TaskQueueImpl::DeferredNonNestableTask& non_nestable_task =
+ main_thread_only().non_nestable_task_queue.back();
+ non_nestable_task.task_queue->RequeueDeferredNonNestableTask(
+ std::move(non_nestable_task));
+ main_thread_only().non_nestable_task_queue.pop_back();
+ }
+ }
+ if (main_thread_only().observer)
+ main_thread_only().observer->OnExitNestedRunLoop();
+}
+
+void SequenceManagerImpl::OnQueueHasIncomingImmediateWork(
+ internal::TaskQueueImpl* queue,
+ internal::EnqueueOrder enqueue_order,
+ bool queue_is_blocked) {
+ if (AddToIncomingImmediateWorkList(queue, enqueue_order) && !queue_is_blocked)
+ controller_->ScheduleWork();
+}
+
+void SequenceManagerImpl::MaybeScheduleImmediateWork(
+ const Location& from_here) {
+ controller_->ScheduleWork();
+}
+
+void SequenceManagerImpl::SetNextDelayedDoWork(LazyNow* lazy_now,
+ TimeTicks run_time) {
+ controller_->SetNextDelayedDoWork(lazy_now, run_time);
+}
+
+Optional<PendingTask> SequenceManagerImpl::TakeTask() {
+ CHECK(Validate());
+
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ TRACE_EVENT0("sequence_manager", "SequenceManagerImpl::TakeTask");
+
+ {
+ AutoLock lock(any_thread_lock_);
+ main_thread_only().queues_to_reload.clear();
+
+ for (internal::IncomingImmediateWorkList* iter =
+ any_thread().incoming_immediate_work_list;
+ iter; iter = iter->next) {
+ main_thread_only().queues_to_reload.push_back(iter->queue);
+ iter->queue = nullptr;
+ }
+
+ any_thread().incoming_immediate_work_list = nullptr;
+ }
+
+ // It's important we call ReloadEmptyWorkQueues out side of the lock to
+ // avoid a lock order inversion.
+ ReloadEmptyWorkQueues();
+ LazyNow lazy_now(controller_->GetClock());
+ WakeUpReadyDelayedQueues(&lazy_now);
+
+ while (true) {
+ internal::WorkQueue* work_queue = nullptr;
+ bool should_run =
+ main_thread_only().selector.SelectWorkQueueToService(&work_queue);
+ TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
+ TRACE_DISABLED_BY_DEFAULT("sequence_manager.debug"), "SequenceManager",
+ this, AsValueWithSelectorResult(should_run, work_queue));
+
+ if (!should_run)
+ return nullopt;
+
+ // If the head task was canceled, remove it and run the selector again.
+ if (work_queue->RemoveAllCanceledTasksFromFront())
+ continue;
+
+ if (work_queue->GetFrontTask()->nestable == Nestable::kNonNestable &&
+ main_thread_only().nesting_depth > 0) {
+ // Defer non-nestable work. NOTE these tasks can be arbitrarily delayed so
+ // the additional delay should not be a problem.
+ // Note because we don't delete queues while nested, it's perfectly OK to
+ // store the raw pointer for |queue| here.
+ internal::TaskQueueImpl::DeferredNonNestableTask deferred_task{
+ work_queue->TakeTaskFromWorkQueue(), work_queue->task_queue(),
+ work_queue->queue_type()};
+ main_thread_only().non_nestable_task_queue.push_back(
+ std::move(deferred_task));
+ continue;
+ }
+
+ main_thread_only().task_execution_stack.emplace_back(
+ work_queue->TakeTaskFromWorkQueue(), work_queue->task_queue(),
+ InitializeTaskTiming(work_queue->task_queue()));
+
+ UMA_HISTOGRAM_COUNTS_1000("TaskQueueManager.ActiveQueuesCount",
+ main_thread_only().active_queues.size());
+
+ ExecutingTask& executing_task =
+ *main_thread_only().task_execution_stack.rbegin();
+ NotifyWillProcessTask(&executing_task, &lazy_now);
+ return std::move(executing_task.pending_task);
+ }
+}
+
+void SequenceManagerImpl::DidRunTask() {
+ LazyNow lazy_now(controller_->GetClock());
+ ExecutingTask& executing_task =
+ *main_thread_only().task_execution_stack.rbegin();
+ NotifyDidProcessTask(&executing_task, &lazy_now);
+ main_thread_only().task_execution_stack.pop_back();
+
+ if (main_thread_only().nesting_depth == 0)
+ CleanUpQueues();
+}
+
+TimeDelta SequenceManagerImpl::DelayTillNextTask(LazyNow* lazy_now) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+
+ // If the selector has non-empty queues we trivially know there is immediate
+ // work to be done.
+ if (!main_thread_only().selector.AllEnabledWorkQueuesAreEmpty())
+ return TimeDelta();
+
+ // Its possible the selectors state is dirty because ReloadEmptyWorkQueues
+ // hasn't been called yet. This check catches the case of fresh incoming work.
+ {
+ AutoLock lock(any_thread_lock_);
+ for (const internal::IncomingImmediateWorkList* iter =
+ any_thread().incoming_immediate_work_list;
+ iter; iter = iter->next) {
+ if (iter->queue->CouldTaskRun(iter->order))
+ return TimeDelta();
+ }
+ }
+
+ // Otherwise we need to find the shortest delay, if any. NB we don't need to
+ // call WakeUpReadyDelayedQueues because it's assumed DelayTillNextTask will
+ // return TimeDelta>() if the delayed task is due to run now.
+ TimeDelta delay_till_next_task = TimeDelta::Max();
+ for (TimeDomain* time_domain : main_thread_only().time_domains) {
+ Optional<TimeDelta> delay = time_domain->DelayTillNextTask(lazy_now);
+ if (!delay)
+ continue;
+
+ if (*delay < delay_till_next_task)
+ delay_till_next_task = *delay;
+ }
+ return delay_till_next_task;
+}
+
+void SequenceManagerImpl::WillQueueTask(
+ internal::TaskQueueImpl::Task* pending_task) {
+ controller_->WillQueueTask(pending_task);
+}
+
+TaskQueue::TaskTiming SequenceManagerImpl::InitializeTaskTiming(
+ internal::TaskQueueImpl* task_queue) {
+ bool records_wall_time =
+ (task_queue->GetShouldNotifyObservers() &&
+ main_thread_only().task_time_observers.might_have_observers()) ||
+ task_queue->RequiresTaskTiming();
+ bool records_thread_time = records_wall_time && ShouldRecordCPUTimeForTask();
+ return TaskQueue::TaskTiming(records_wall_time, records_thread_time);
+}
+
+void SequenceManagerImpl::NotifyWillProcessTask(ExecutingTask* executing_task,
+ LazyNow* time_before_task) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
+ "SequenceManagerImpl::NotifyWillProcessTaskObservers");
+ if (executing_task->task_queue->GetQuiescenceMonitored())
+ main_thread_only().task_was_run_on_quiescence_monitored_queue = true;
+
+#if !defined(OS_NACL)
+ debug::SetCrashKeyString(
+ main_thread_only().file_name_crash_key,
+ executing_task->pending_task.posted_from.file_name());
+ debug::SetCrashKeyString(
+ main_thread_only().function_name_crash_key,
+ executing_task->pending_task.posted_from.function_name());
+#endif // OS_NACL
+
+ executing_task->task_timing.RecordTaskStart(time_before_task);
+
+ if (!executing_task->task_queue->GetShouldNotifyObservers())
+ return;
+
+ {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
+ "SequenceManager.WillProcessTaskObservers");
+ for (auto& observer : main_thread_only().task_observers)
+ observer.WillProcessTask(executing_task->pending_task);
+ }
+
+ {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
+ "SequenceManager.QueueNotifyWillProcessTask");
+ executing_task->task_queue->NotifyWillProcessTask(
+ executing_task->pending_task);
+ }
+
+ bool notify_time_observers =
+ main_thread_only().task_time_observers.might_have_observers() ||
+ executing_task->task_queue->RequiresTaskTiming();
+
+ if (!notify_time_observers)
+ return;
+
+ if (main_thread_only().nesting_depth == 0) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
+ "SequenceManager.WillProcessTaskTimeObservers");
+ for (auto& observer : main_thread_only().task_time_observers)
+ observer.WillProcessTask(executing_task->task_timing.start_time());
+ }
+
+ {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
+ "SequenceManager.QueueOnTaskStarted");
+ executing_task->task_queue->OnTaskStarted(executing_task->pending_task,
+ executing_task->task_timing);
+ }
+}
+
+void SequenceManagerImpl::NotifyDidProcessTask(ExecutingTask* executing_task,
+ LazyNow* time_after_task) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
+ "SequenceManagerImpl::NotifyDidProcessTaskObservers");
+
+ executing_task->task_timing.RecordTaskEnd(time_after_task);
+
+ const TaskQueue::TaskTiming& task_timing = executing_task->task_timing;
+
+ if (!executing_task->task_queue->GetShouldNotifyObservers())
+ return;
+
+ if (task_timing.has_wall_time() && main_thread_only().nesting_depth == 0) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
+ "SequenceManager.DidProcessTaskTimeObservers");
+ for (auto& observer : main_thread_only().task_time_observers) {
+ observer.DidProcessTask(task_timing.start_time(), task_timing.end_time());
+ }
+ }
+
+ {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
+ "SequenceManager.DidProcessTaskObservers");
+ for (auto& observer : main_thread_only().task_observers)
+ observer.DidProcessTask(executing_task->pending_task);
+ }
+
+ {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
+ "SequenceManager.QueueNotifyDidProcessTask");
+ executing_task->task_queue->NotifyDidProcessTask(
+ executing_task->pending_task);
+ }
+
+ {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
+ "SequenceManager.QueueOnTaskCompleted");
+ if (task_timing.has_wall_time())
+ executing_task->task_queue->OnTaskCompleted(executing_task->pending_task,
+ task_timing);
+ }
+
+ // TODO(altimin): Move this back to blink.
+ if (task_timing.has_wall_time() &&
+ task_timing.wall_duration() > kLongTaskTraceEventThreshold &&
+ main_thread_only().nesting_depth == 0) {
+ TRACE_EVENT_INSTANT1("blink", "LongTask", TRACE_EVENT_SCOPE_THREAD,
+ "duration", task_timing.wall_duration().InSecondsF());
+ }
+}
+
+void SequenceManagerImpl::SetWorkBatchSize(int work_batch_size) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ DCHECK_GE(work_batch_size, 1);
+ controller_->SetWorkBatchSize(work_batch_size);
+}
+
+void SequenceManagerImpl::AddTaskObserver(
+ MessageLoop::TaskObserver* task_observer) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ main_thread_only().task_observers.AddObserver(task_observer);
+}
+
+void SequenceManagerImpl::RemoveTaskObserver(
+ MessageLoop::TaskObserver* task_observer) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ main_thread_only().task_observers.RemoveObserver(task_observer);
+}
+
+void SequenceManagerImpl::AddTaskTimeObserver(
+ TaskTimeObserver* task_time_observer) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ main_thread_only().task_time_observers.AddObserver(task_time_observer);
+}
+
+void SequenceManagerImpl::RemoveTaskTimeObserver(
+ TaskTimeObserver* task_time_observer) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ main_thread_only().task_time_observers.RemoveObserver(task_time_observer);
+}
+
+bool SequenceManagerImpl::GetAndClearSystemIsQuiescentBit() {
+ bool task_was_run =
+ main_thread_only().task_was_run_on_quiescence_monitored_queue;
+ main_thread_only().task_was_run_on_quiescence_monitored_queue = false;
+ return !task_was_run;
+}
+
+internal::EnqueueOrder SequenceManagerImpl::GetNextSequenceNumber() {
+ return enqueue_order_generator_.GenerateNext();
+}
+
+std::unique_ptr<trace_event::ConvertableToTraceFormat>
+SequenceManagerImpl::AsValueWithSelectorResult(
+ bool should_run,
+ internal::WorkQueue* selected_work_queue) const {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ std::unique_ptr<trace_event::TracedValue> state(
+ new trace_event::TracedValue());
+ TimeTicks now = NowTicks();
+ state->BeginArray("active_queues");
+ for (auto* const queue : main_thread_only().active_queues)
+ queue->AsValueInto(now, state.get());
+ state->EndArray();
+ state->BeginArray("queues_to_gracefully_shutdown");
+ for (const auto& pair : main_thread_only().queues_to_gracefully_shutdown)
+ pair.first->AsValueInto(now, state.get());
+ state->EndArray();
+ state->BeginArray("queues_to_delete");
+ for (const auto& pair : main_thread_only().queues_to_delete)
+ pair.first->AsValueInto(now, state.get());
+ state->EndArray();
+ state->BeginDictionary("selector");
+ main_thread_only().selector.AsValueInto(state.get());
+ state->EndDictionary();
+ if (should_run) {
+ state->SetString("selected_queue",
+ selected_work_queue->task_queue()->GetName());
+ state->SetString("work_queue_name", selected_work_queue->name());
+ }
+
+ state->BeginArray("time_domains");
+ for (auto* time_domain : main_thread_only().time_domains)
+ time_domain->AsValueInto(state.get());
+ state->EndArray();
+ {
+ AutoLock lock(any_thread_lock_);
+ state->BeginArray("has_incoming_immediate_work");
+ for (const internal::IncomingImmediateWorkList* iter =
+ any_thread().incoming_immediate_work_list;
+ iter; iter = iter->next) {
+ state->AppendString(iter->queue->GetName());
+ }
+ state->EndArray();
+ }
+ return std::move(state);
+}
+
+void SequenceManagerImpl::OnTaskQueueEnabled(internal::TaskQueueImpl* queue) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ DCHECK(queue->IsQueueEnabled());
+ // Only schedule DoWork if there's something to do.
+ if (queue->HasTaskToRunImmediately() && !queue->BlockedByFence())
+ MaybeScheduleImmediateWork(FROM_HERE);
+}
+
+void SequenceManagerImpl::SweepCanceledDelayedTasks() {
+ std::map<TimeDomain*, TimeTicks> time_domain_now;
+ for (auto* const queue : main_thread_only().active_queues)
+ SweepCanceledDelayedTasksInQueue(queue, &time_domain_now);
+ for (const auto& pair : main_thread_only().queues_to_gracefully_shutdown)
+ SweepCanceledDelayedTasksInQueue(pair.first, &time_domain_now);
+}
+
+void SequenceManagerImpl::TakeQueuesToGracefullyShutdownFromHelper() {
+ std::vector<std::unique_ptr<internal::TaskQueueImpl>> queues =
+ graceful_shutdown_helper_->TakeQueues();
+ for (std::unique_ptr<internal::TaskQueueImpl>& queue : queues) {
+ main_thread_only().queues_to_gracefully_shutdown[queue.get()] =
+ std::move(queue);
+ }
+}
+
+void SequenceManagerImpl::CleanUpQueues() {
+ TakeQueuesToGracefullyShutdownFromHelper();
+
+ for (auto it = main_thread_only().queues_to_gracefully_shutdown.begin();
+ it != main_thread_only().queues_to_gracefully_shutdown.end();) {
+ if (it->first->IsEmpty()) {
+ UnregisterTaskQueueImpl(std::move(it->second));
+ main_thread_only().active_queues.erase(it->first);
+ main_thread_only().queues_to_gracefully_shutdown.erase(it++);
+ } else {
+ ++it;
+ }
+ }
+ main_thread_only().queues_to_delete.clear();
+}
+
+scoped_refptr<internal::GracefulQueueShutdownHelper>
+SequenceManagerImpl::GetGracefulQueueShutdownHelper() const {
+ return graceful_shutdown_helper_;
+}
+
+WeakPtr<SequenceManagerImpl> SequenceManagerImpl::GetWeakPtr() {
+ return weak_factory_.GetWeakPtr();
+}
+
+void SequenceManagerImpl::SetDefaultTaskRunner(
+ scoped_refptr<SingleThreadTaskRunner> task_runner) {
+ controller_->SetDefaultTaskRunner(task_runner);
+}
+
+const TickClock* SequenceManagerImpl::GetTickClock() const {
+ return controller_->GetClock();
+}
+
+TimeTicks SequenceManagerImpl::NowTicks() const {
+ return controller_->GetClock()->NowTicks();
+}
+
+bool SequenceManagerImpl::ShouldRecordCPUTimeForTask() {
+ return ThreadTicks::IsSupported() &&
+ main_thread_only().uniform_distribution(
+ main_thread_only().random_generator) <
+ metric_recording_settings_
+ .task_sampling_rate_for_recording_cpu_time;
+}
+
+const SequenceManager::MetricRecordingSettings&
+SequenceManagerImpl::GetMetricRecordingSettings() const {
+ return metric_recording_settings_;
+}
+
+MSVC_DISABLE_OPTIMIZE()
+bool SequenceManagerImpl::Validate() {
+ return memory_corruption_sentinel_ == kMemoryCorruptionSentinelValue;
+}
+MSVC_ENABLE_OPTIMIZE()
+
+void SequenceManagerImpl::EnableCrashKeys(
+ const char* file_name_crash_key_name,
+ const char* function_name_crash_key_name) {
+ DCHECK(!main_thread_only().file_name_crash_key);
+ DCHECK(!main_thread_only().function_name_crash_key);
+#if !defined(OS_NACL)
+ main_thread_only().file_name_crash_key = debug::AllocateCrashKeyString(
+ file_name_crash_key_name, debug::CrashKeySize::Size64);
+ main_thread_only().function_name_crash_key = debug::AllocateCrashKeyString(
+ function_name_crash_key_name, debug::CrashKeySize::Size64);
+#endif // OS_NACL
+}
+
+internal::TaskQueueImpl* SequenceManagerImpl::currently_executing_task_queue()
+ const {
+ if (main_thread_only().task_execution_stack.empty())
+ return nullptr;
+ return main_thread_only().task_execution_stack.rbegin()->task_queue;
+}
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
diff --git a/chromium/base/task/sequence_manager/sequence_manager_impl.h b/chromium/base/task/sequence_manager/sequence_manager_impl.h
new file mode 100644
index 00000000000..b42dc727981
--- /dev/null
+++ b/chromium/base/task/sequence_manager/sequence_manager_impl.h
@@ -0,0 +1,341 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_SEQUENCE_MANAGER_IMPL_H_
+#define BASE_TASK_SEQUENCE_MANAGER_SEQUENCE_MANAGER_IMPL_H_
+
+#include <list>
+#include <map>
+#include <memory>
+#include <random>
+#include <set>
+#include <unordered_map>
+#include <utility>
+
+#include "base/atomic_sequence_num.h"
+#include "base/cancelable_callback.h"
+#include "base/containers/circular_deque.h"
+#include "base/debug/task_annotator.h"
+#include "base/macros.h"
+#include "base/memory/scoped_refptr.h"
+#include "base/memory/weak_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "base/pending_task.h"
+#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/lock.h"
+#include "base/task/sequence_manager/enqueue_order.h"
+#include "base/task/sequence_manager/graceful_queue_shutdown_helper.h"
+#include "base/task/sequence_manager/moveable_auto_lock.h"
+#include "base/task/sequence_manager/sequence_manager.h"
+#include "base/task/sequence_manager/task_queue_impl.h"
+#include "base/task/sequence_manager/task_queue_selector.h"
+#include "base/task/sequence_manager/thread_controller.h"
+#include "base/threading/thread_checker.h"
+
+namespace base {
+
+namespace debug {
+struct CrashKeyString;
+} // namespace debug
+
+namespace trace_event {
+class ConvertableToTraceFormat;
+} // namespace trace_event
+
+namespace sequence_manager {
+
+class SequenceManagerForTest;
+class TaskQueue;
+class TaskTimeObserver;
+class TimeDomain;
+
+namespace internal {
+
+class RealTimeDomain;
+class TaskQueueImpl;
+
+// The task queue manager provides N task queues and a selector interface for
+// choosing which task queue to service next. Each task queue consists of two
+// sub queues:
+//
+// 1. Incoming task queue. Tasks that are posted get immediately appended here.
+// When a task is appended into an empty incoming queue, the task manager
+// work function (DoWork()) is scheduled to run on the main task runner.
+//
+// 2. Work queue. If a work queue is empty when DoWork() is entered, tasks from
+// the incoming task queue (if any) are moved here. The work queues are
+// registered with the selector as input to the scheduling decision.
+//
+class BASE_EXPORT SequenceManagerImpl
+ : public SequenceManager,
+ public internal::SequencedTaskSource,
+ public internal::TaskQueueSelector::Observer,
+ public RunLoop::NestingObserver {
+ public:
+ using Observer = SequenceManager::Observer;
+
+ ~SequenceManagerImpl() override;
+
+ // Assume direct control over current thread and create a SequenceManager.
+ // This function should be called only once per thread.
+ // This function assumes that a MessageLoop is initialized for
+ // the current thread.
+ static std::unique_ptr<SequenceManagerImpl> CreateOnCurrentThread();
+
+ // SequenceManager implementation:
+ void SetObserver(Observer* observer) override;
+ void AddTaskObserver(MessageLoop::TaskObserver* task_observer) override;
+ void RemoveTaskObserver(MessageLoop::TaskObserver* task_observer) override;
+ void AddTaskTimeObserver(TaskTimeObserver* task_time_observer) override;
+ void RemoveTaskTimeObserver(TaskTimeObserver* task_time_observer) override;
+ void RegisterTimeDomain(TimeDomain* time_domain) override;
+ void UnregisterTimeDomain(TimeDomain* time_domain) override;
+ TimeDomain* GetRealTimeDomain() const override;
+ const TickClock* GetTickClock() const override;
+ TimeTicks NowTicks() const override;
+ void SetDefaultTaskRunner(
+ scoped_refptr<SingleThreadTaskRunner> task_runner) override;
+ void SweepCanceledDelayedTasks() override;
+ bool GetAndClearSystemIsQuiescentBit() override;
+ void SetWorkBatchSize(int work_batch_size) override;
+ void EnableCrashKeys(const char* file_name_crash_key,
+ const char* function_name_crash_key) override;
+ const MetricRecordingSettings& GetMetricRecordingSettings() const override;
+
+ // Implementation of SequencedTaskSource:
+ Optional<PendingTask> TakeTask() override;
+ void DidRunTask() override;
+ TimeDelta DelayTillNextTask(LazyNow* lazy_now) override;
+
+ // Requests that a task to process work is posted on the main task runner.
+ // These tasks are de-duplicated in two buckets: main-thread and all other
+ // threads. This distinction is done to reduce the overhead from locks, we
+ // assume the main-thread path will be hot.
+ void MaybeScheduleImmediateWork(const Location& from_here);
+
+ // Requests that a delayed task to process work is posted on the main task
+ // runner. These delayed tasks are de-duplicated. Must be called on the thread
+ // this class was created on.
+
+ // Schedules next wake-up at the given time, cancels any previous requests.
+ // Use TimeTicks::Max() to cancel a wake-up.
+ // Must be called from a TimeDomain only.
+ void SetNextDelayedDoWork(LazyNow* lazy_now, TimeTicks run_time);
+
+ // Returns the currently executing TaskQueue if any. Must be called on the
+ // thread this class was created on.
+ internal::TaskQueueImpl* currently_executing_task_queue() const;
+
+ // Unregisters a TaskQueue previously created by |NewTaskQueue()|.
+ // No tasks will run on this queue after this call.
+ void UnregisterTaskQueueImpl(
+ std::unique_ptr<internal::TaskQueueImpl> task_queue);
+
+ scoped_refptr<internal::GracefulQueueShutdownHelper>
+ GetGracefulQueueShutdownHelper() const;
+
+ WeakPtr<SequenceManagerImpl> GetWeakPtr();
+
+ protected:
+ // Create a task queue manager where |controller| controls the thread
+ // on which the tasks are eventually run.
+ explicit SequenceManagerImpl(
+ std::unique_ptr<internal::ThreadController> controller);
+
+ friend class internal::TaskQueueImpl;
+ friend class ::base::sequence_manager::SequenceManagerForTest;
+
+ private:
+ enum class ProcessTaskResult {
+ kDeferred,
+ kExecuted,
+ kSequenceManagerDeleted,
+ };
+
+ struct AnyThread {
+ AnyThread();
+ ~AnyThread();
+
+ // Task queues with newly available work on the incoming queue.
+ internal::IncomingImmediateWorkList* incoming_immediate_work_list = nullptr;
+ };
+
+ // SequenceManager maintains a queue of non-nestable tasks since they're
+ // uncommon and allocating an extra deque per TaskQueue will waste the memory.
+ using NonNestableTaskDeque =
+ circular_deque<internal::TaskQueueImpl::DeferredNonNestableTask>;
+
+ // We have to track rentrancy because we support nested runloops but the
+ // selector interface is unaware of those. This struct keeps track off all
+ // task related state needed to make pairs of TakeTask() / DidRunTask() work.
+ struct ExecutingTask {
+ ExecutingTask(internal::TaskQueueImpl::Task&& pending_task,
+ internal::TaskQueueImpl* task_queue,
+ TaskQueue::TaskTiming task_timing)
+ : pending_task(std::move(pending_task)),
+ task_queue(task_queue),
+ task_timing(task_timing) {}
+
+ internal::TaskQueueImpl::Task pending_task;
+ internal::TaskQueueImpl* task_queue = nullptr;
+ TaskQueue::TaskTiming task_timing;
+ };
+
+ struct MainThreadOnly {
+ MainThreadOnly();
+ ~MainThreadOnly();
+
+ int nesting_depth = 0;
+ NonNestableTaskDeque non_nestable_task_queue;
+ // TODO(altimin): Switch to instruction pointer crash key when it's
+ // available.
+ debug::CrashKeyString* file_name_crash_key = nullptr;
+ debug::CrashKeyString* function_name_crash_key = nullptr;
+
+ std::mt19937_64 random_generator;
+ std::uniform_real_distribution<double> uniform_distribution;
+
+ internal::TaskQueueSelector selector;
+ ObserverList<MessageLoop::TaskObserver> task_observers;
+ ObserverList<TaskTimeObserver> task_time_observers;
+ std::set<TimeDomain*> time_domains;
+ std::unique_ptr<internal::RealTimeDomain> real_time_domain;
+
+ // List of task queues managed by this SequenceManager.
+ // - active_queues contains queues that are still running tasks.
+ // Most often they are owned by relevant TaskQueues, but
+ // queues_to_gracefully_shutdown_ are included here too.
+ // - queues_to_gracefully_shutdown contains queues which should be deleted
+ // when they become empty.
+ // - queues_to_delete contains soon-to-be-deleted queues, because some
+ // internal scheduling code does not expect queues to be pulled
+ // from underneath.
+
+ std::set<internal::TaskQueueImpl*> active_queues;
+ std::map<internal::TaskQueueImpl*, std::unique_ptr<internal::TaskQueueImpl>>
+ queues_to_gracefully_shutdown;
+ std::map<internal::TaskQueueImpl*, std::unique_ptr<internal::TaskQueueImpl>>
+ queues_to_delete;
+
+ // Scratch space used to store the contents of
+ // any_thread().incoming_immediate_work_list for use by
+ // ReloadEmptyWorkQueues. We keep hold of this vector to avoid unnecessary
+ // memory allocations.
+ std::vector<internal::TaskQueueImpl*> queues_to_reload;
+
+ bool task_was_run_on_quiescence_monitored_queue = false;
+
+ // Due to nested runloops more than one task can be executing concurrently.
+ std::list<ExecutingTask> task_execution_stack;
+
+ Observer* observer = nullptr; // NOT OWNED
+ };
+
+ // TaskQueueSelector::Observer:
+ void OnTaskQueueEnabled(internal::TaskQueueImpl* queue) override;
+
+ // RunLoop::NestingObserver:
+ void OnBeginNestedRunLoop() override;
+ void OnExitNestedRunLoop() override;
+
+ // Called by the task queue to inform this SequenceManager of a task that's
+ // about to be queued. This SequenceManager may use this opportunity to add
+ // metadata to |pending_task| before it is moved into the queue.
+ void WillQueueTask(internal::TaskQueueImpl::Task* pending_task);
+
+ // Delayed Tasks with run_times <= Now() are enqueued onto the work queue and
+ // reloads any empty work queues.
+ void WakeUpReadyDelayedQueues(LazyNow* lazy_now);
+
+ void NotifyWillProcessTask(ExecutingTask* task, LazyNow* time_before_task);
+ void NotifyDidProcessTask(ExecutingTask* task, LazyNow* time_after_task);
+
+ internal::EnqueueOrder GetNextSequenceNumber();
+
+ std::unique_ptr<trace_event::ConvertableToTraceFormat>
+ AsValueWithSelectorResult(bool should_run,
+ internal::WorkQueue* selected_work_queue) const;
+
+ // Adds |queue| to |any_thread().has_incoming_immediate_work_| and if
+ // |queue_is_blocked| is false it makes sure a DoWork is posted.
+ // Can be called from any thread.
+ void OnQueueHasIncomingImmediateWork(internal::TaskQueueImpl* queue,
+ internal::EnqueueOrder enqueue_order,
+ bool queue_is_blocked);
+
+ // Returns true if |task_queue| was added to the list, or false if it was
+ // already in the list. If |task_queue| was inserted, the |order| is set
+ // with |enqueue_order|.
+ bool AddToIncomingImmediateWorkList(internal::TaskQueueImpl* task_queue,
+ internal::EnqueueOrder enqueue_order);
+ void RemoveFromIncomingImmediateWorkList(internal::TaskQueueImpl* task_queue);
+
+ // Calls |ReloadImmediateWorkQueueIfEmpty| on all queues in
+ // |main_thread_only().queues_to_reload|.
+ void ReloadEmptyWorkQueues();
+
+ std::unique_ptr<internal::TaskQueueImpl> CreateTaskQueueImpl(
+ const TaskQueue::Spec& spec) override;
+
+ void TakeQueuesToGracefullyShutdownFromHelper();
+
+ // Deletes queues marked for deletion and empty queues marked for shutdown.
+ void CleanUpQueues();
+
+ bool ShouldRecordCPUTimeForTask();
+
+ // Determines if wall time or thread time should be recorded for the next
+ // task.
+ TaskQueue::TaskTiming InitializeTaskTiming(
+ internal::TaskQueueImpl* task_queue);
+
+ const scoped_refptr<internal::GracefulQueueShutdownHelper>
+ graceful_shutdown_helper_;
+
+ internal::EnqueueOrder::Generator enqueue_order_generator_;
+
+ std::unique_ptr<internal::ThreadController> controller_;
+
+ mutable Lock any_thread_lock_;
+ AnyThread any_thread_;
+
+ struct AnyThread& any_thread() {
+ any_thread_lock_.AssertAcquired();
+ return any_thread_;
+ }
+ const struct AnyThread& any_thread() const {
+ any_thread_lock_.AssertAcquired();
+ return any_thread_;
+ }
+
+ const MetricRecordingSettings metric_recording_settings_;
+
+ // A check to bail out early during memory corruption.
+ // https://crbug.com/757940
+ bool Validate();
+
+ int32_t memory_corruption_sentinel_;
+
+ THREAD_CHECKER(main_thread_checker_);
+ MainThreadOnly main_thread_only_;
+ MainThreadOnly& main_thread_only() {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ return main_thread_only_;
+ }
+ const MainThreadOnly& main_thread_only() const {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ return main_thread_only_;
+ }
+
+ WeakPtrFactory<SequenceManagerImpl> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(SequenceManagerImpl);
+};
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_SEQUENCE_MANAGER_IMPL_H_
diff --git a/chromium/base/task/sequence_manager/sequence_manager_impl_unittest.cc b/chromium/base/task/sequence_manager/sequence_manager_impl_unittest.cc
new file mode 100644
index 00000000000..6550f825042
--- /dev/null
+++ b/chromium/base/task/sequence_manager/sequence_manager_impl_unittest.cc
@@ -0,0 +1,3285 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/sequence_manager_impl.h"
+
+#include <stddef.h>
+#include <memory>
+#include <utility>
+
+#include "base/location.h"
+#include "base/memory/ref_counted_memory.h"
+#include "base/message_loop/message_loop.h"
+#include "base/message_loop/message_loop_current.h"
+#include "base/optional.h"
+#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/task/sequence_manager/real_time_domain.h"
+#include "base/task/sequence_manager/task_queue_impl.h"
+#include "base/task/sequence_manager/task_queue_selector.h"
+#include "base/task/sequence_manager/test/mock_time_domain.h"
+#include "base/task/sequence_manager/test/sequence_manager_for_test.h"
+#include "base/task/sequence_manager/test/test_task_queue.h"
+#include "base/task/sequence_manager/test/test_task_time_observer.h"
+#include "base/task/sequence_manager/thread_controller_with_message_pump_impl.h"
+#include "base/task/sequence_manager/work_queue.h"
+#include "base/task/sequence_manager/work_queue_sets.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "base/test/test_mock_time_task_runner.h"
+#include "base/test/test_simple_task_runner.h"
+#include "base/test/trace_event_analyzer.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/trace_event/blame_context.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+using testing::AnyNumber;
+using testing::Contains;
+using testing::ElementsAre;
+using testing::ElementsAreArray;
+using testing::Mock;
+using testing::Not;
+using testing::_;
+using base::sequence_manager::internal::EnqueueOrder;
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+// To avoid symbol collisions in jumbo builds.
+namespace sequence_manager_impl_unittest {
+
+enum class TestType : int {
+ kCustom = 0,
+ kUseMockTaskRunner = 1,
+ kUseMessageLoop = 2,
+ kUseMessagePump = 3,
+};
+
+class SequenceManagerTestBase : public testing::TestWithParam<TestType> {
+ protected:
+ void TearDown() override {
+ // SequenceManager should be deleted before an underlying task runner.
+ manager_.reset();
+ }
+
+ scoped_refptr<TestTaskQueue> CreateTaskQueue(
+ TaskQueue::Spec spec = TaskQueue::Spec("test")) {
+ return manager_->CreateTaskQueue<TestTaskQueue>(spec);
+ }
+
+ void CreateTaskQueues(size_t num_queues) {
+ for (size_t i = 0; i < num_queues; i++)
+ runners_.push_back(CreateTaskQueue());
+ }
+
+ std::unique_ptr<SequenceManagerForTest> manager_;
+ std::vector<scoped_refptr<TestTaskQueue>> runners_;
+ TimeTicks start_time_;
+ TestTaskTimeObserver test_task_time_observer_;
+};
+
+// SequenceManagerImpl uses TestMockTimeTaskRunner which controls
+// both task execution and mock clock.
+// TODO(kraynov): Make this class to support all TestTypes.
+// It will allow us to re-run tests in various environments before we'll
+// eventually move to MessagePump and remove current ThreadControllerImpl.
+class SequenceManagerTest : public SequenceManagerTestBase {
+ public:
+ void DeleteSequenceManagerTask() { manager_.reset(); }
+
+ protected:
+ void SetUp() override {
+ ASSERT_EQ(GetParam(), TestType::kUseMockTaskRunner);
+ test_task_runner_ = WrapRefCounted(new TestMockTimeTaskRunner(
+ TestMockTimeTaskRunner::Type::kBoundToThread));
+ // A null clock triggers some assertions.
+ test_task_runner_->AdvanceMockTickClock(TimeDelta::FromMilliseconds(1));
+ start_time_ = GetTickClock()->NowTicks();
+
+ manager_ =
+ SequenceManagerForTest::Create(nullptr, ThreadTaskRunnerHandle::Get(),
+ test_task_runner_->GetMockTickClock());
+ }
+
+ const TickClock* GetTickClock() {
+ return test_task_runner_->GetMockTickClock();
+ }
+
+ void RunPendingTasks() {
+ // We should only run tasks already posted by that moment.
+ RunLoop run_loop;
+ test_task_runner_->PostTask(FROM_HERE, run_loop.QuitClosure());
+ // TestMockTimeTaskRunner will fast-forward mock clock if necessary.
+ run_loop.Run();
+ }
+
+ // Runs all immediate tasks until there is no more work to do and advances
+ // time if there is a pending delayed task. |per_run_time_callback| is called
+ // when the clock advances.
+ // The only difference to FastForwardUntilNoTasksRemain is that time
+ // advancing isn't driven by the test task runner, but uses time domain's
+ // next scheduled run time instead. It allows us to double-check consistency
+ // and allows to count such bursts of doing work, which is a test subject.
+ void RunUntilManagerIsIdle(RepeatingClosure per_run_time_callback) {
+ for (;;) {
+ // Advance time if we've run out of immediate work to do.
+ if (!manager_->HasImmediateWork()) {
+ LazyNow lazy_now(GetTickClock());
+ Optional<TimeDelta> delay =
+ manager_->GetRealTimeDomain()->DelayTillNextTask(&lazy_now);
+ if (delay) {
+ test_task_runner_->AdvanceMockTickClock(*delay);
+ per_run_time_callback.Run();
+ } else {
+ break;
+ }
+ }
+ RunPendingTasks();
+ }
+ }
+
+ scoped_refptr<TestMockTimeTaskRunner> test_task_runner_;
+};
+
+// SequenceManagerImpl is being initialized with real MessageLoop
+// at cost of less control over a task runner.
+// It also runs a version with experimental MessagePump support.
+// TODO(kraynov): Generalize as many tests as possible to run it
+// in all supported environments.
+class SequenceManagerTestWithMessageLoop : public SequenceManagerTestBase {
+ protected:
+ void SetUp() override {
+ switch (GetParam()) {
+ case TestType::kUseMessageLoop:
+ SetUpWithMessageLoop();
+ break;
+ case TestType::kUseMessagePump:
+ SetUpWithMessagePump();
+ break;
+ default:
+ FAIL();
+ }
+ }
+
+ void SetUpWithMessageLoop() {
+ message_loop_.reset(new MessageLoop());
+ // A null clock triggers some assertions.
+ mock_clock_.Advance(TimeDelta::FromMilliseconds(1));
+ start_time_ = mock_clock_.NowTicks();
+
+ manager_ = SequenceManagerForTest::Create(
+ message_loop_.get(), ThreadTaskRunnerHandle::Get(), &mock_clock_);
+ }
+
+ void SetUpWithMessagePump() {
+ mock_clock_.Advance(TimeDelta::FromMilliseconds(1));
+ start_time_ = mock_clock_.NowTicks();
+ manager_ = std::make_unique<SequenceManagerForTest>(
+ std::make_unique<ThreadControllerWithMessagePumpImpl>(&mock_clock_));
+ // ThreadControllerWithMessagePumpImpl doesn't provide a default tas runner.
+ scoped_refptr<TaskQueue> default_task_queue =
+ manager_->CreateTaskQueue<TestTaskQueue>(TaskQueue::Spec("default"));
+ manager_->SetDefaultTaskRunner(default_task_queue);
+ }
+
+ const TickClock* GetTickClock() { return &mock_clock_; }
+
+ std::unique_ptr<MessageLoop> message_loop_;
+ SimpleTestTickClock mock_clock_;
+};
+
+class SequenceManagerTestWithCustomInitialization
+ : public SequenceManagerTestWithMessageLoop {
+ protected:
+ void SetUp() override { ASSERT_EQ(GetParam(), TestType::kCustom); }
+};
+
+INSTANTIATE_TEST_CASE_P(,
+ SequenceManagerTest,
+ testing::Values(TestType::kUseMockTaskRunner));
+
+INSTANTIATE_TEST_CASE_P(,
+ SequenceManagerTestWithMessageLoop,
+ testing::Values(TestType::kUseMessageLoop,
+ TestType::kUseMessagePump));
+
+INSTANTIATE_TEST_CASE_P(,
+ SequenceManagerTestWithCustomInitialization,
+ testing::Values(TestType::kCustom));
+
+void PostFromNestedRunloop(SingleThreadTaskRunner* runner,
+ std::vector<std::pair<OnceClosure, bool>>* tasks) {
+ for (std::pair<OnceClosure, bool>& pair : *tasks) {
+ if (pair.second) {
+ runner->PostTask(FROM_HERE, std::move(pair.first));
+ } else {
+ runner->PostNonNestableTask(FROM_HERE, std::move(pair.first));
+ }
+ }
+ RunLoop(RunLoop::Type::kNestableTasksAllowed).RunUntilIdle();
+}
+
+void NopTask() {}
+
+class TestCountUsesTimeSource : public TickClock {
+ public:
+ TestCountUsesTimeSource() = default;
+ ~TestCountUsesTimeSource() override = default;
+
+ TimeTicks NowTicks() const override {
+ now_calls_count_++;
+ // Don't return 0, as it triggers some assertions.
+ return TimeTicks() + TimeDelta::FromSeconds(1);
+ }
+
+ int now_calls_count() const { return now_calls_count_; }
+
+ private:
+ mutable int now_calls_count_ = 0;
+
+ DISALLOW_COPY_AND_ASSIGN(TestCountUsesTimeSource);
+};
+
+TEST_P(SequenceManagerTestWithCustomInitialization,
+ NowCalledMinimumNumberOfTimesToComputeTaskDurations) {
+ message_loop_.reset(new MessageLoop());
+ // This memory is managed by the SequenceManager, but we need to hold a
+ // pointer to this object to read out how many times Now was called.
+ TestCountUsesTimeSource test_count_uses_time_source;
+
+ manager_ = SequenceManagerForTest::Create(
+ nullptr, ThreadTaskRunnerHandle::Get(), &test_count_uses_time_source);
+ manager_->SetWorkBatchSize(6);
+ manager_->AddTaskTimeObserver(&test_task_time_observer_);
+
+ for (size_t i = 0; i < 3; i++)
+ runners_.push_back(CreateTaskQueue());
+
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&NopTask));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&NopTask));
+ runners_[1]->PostTask(FROM_HERE, BindOnce(&NopTask));
+ runners_[1]->PostTask(FROM_HERE, BindOnce(&NopTask));
+ runners_[2]->PostTask(FROM_HERE, BindOnce(&NopTask));
+ runners_[2]->PostTask(FROM_HERE, BindOnce(&NopTask));
+
+ RunLoop().RunUntilIdle();
+ // Now is called each time a task is queued, when first task is started
+ // running, and when a task is completed. 6 * 3 = 18 calls.
+ EXPECT_EQ(18, test_count_uses_time_source.now_calls_count());
+}
+
+void NullTask() {}
+
+void TestTask(uint64_t value, std::vector<EnqueueOrder>* out_result) {
+ out_result->push_back(EnqueueOrder::FromIntForTesting(value));
+}
+
+void DisableQueueTestTask(uint64_t value,
+ std::vector<EnqueueOrder>* out_result,
+ TaskQueue::QueueEnabledVoter* voter) {
+ out_result->push_back(EnqueueOrder::FromIntForTesting(value));
+ voter->SetQueueEnabled(false);
+}
+
+TEST_P(SequenceManagerTest, SingleQueuePosting) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order));
+
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u, 3u));
+}
+
+TEST_P(SequenceManagerTest, MultiQueuePosting) {
+ CreateTaskQueues(3u);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order));
+ runners_[1]->PostTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order));
+ runners_[1]->PostTask(FROM_HERE, BindOnce(&TestTask, 4, &run_order));
+ runners_[2]->PostTask(FROM_HERE, BindOnce(&TestTask, 5, &run_order));
+ runners_[2]->PostTask(FROM_HERE, BindOnce(&TestTask, 6, &run_order));
+
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u, 3u, 4u, 5u, 6u));
+}
+
+TEST_P(SequenceManagerTestWithMessageLoop, NonNestableTaskPosting) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostNonNestableTask(FROM_HERE,
+ BindOnce(&TestTask, 1, &run_order));
+
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1u));
+}
+
+TEST_P(SequenceManagerTestWithMessageLoop,
+ NonNestableTaskExecutesInExpectedOrder) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 4, &run_order));
+ runners_[0]->PostNonNestableTask(FROM_HERE,
+ BindOnce(&TestTask, 5, &run_order));
+
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u, 3u, 4u, 5u));
+}
+
+TEST_P(SequenceManagerTestWithMessageLoop,
+ NonNestableTasksDoesntExecuteInNestedLoop) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order));
+
+ std::vector<std::pair<OnceClosure, bool>> tasks_to_post_from_nested_loop;
+ tasks_to_post_from_nested_loop.push_back(
+ std::make_pair(BindOnce(&TestTask, 3, &run_order), false));
+ tasks_to_post_from_nested_loop.push_back(
+ std::make_pair(BindOnce(&TestTask, 4, &run_order), false));
+ tasks_to_post_from_nested_loop.push_back(
+ std::make_pair(BindOnce(&TestTask, 5, &run_order), true));
+ tasks_to_post_from_nested_loop.push_back(
+ std::make_pair(BindOnce(&TestTask, 6, &run_order), true));
+
+ runners_[0]->PostTask(
+ FROM_HERE, BindOnce(&PostFromNestedRunloop, RetainedRef(runners_[0]),
+ Unretained(&tasks_to_post_from_nested_loop)));
+
+ RunLoop().RunUntilIdle();
+ // Note we expect tasks 3 & 4 to run last because they're non-nestable.
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u, 5u, 6u, 3u, 4u));
+}
+
+namespace {
+
+void InsertFenceAndPostTestTask(int id,
+ std::vector<EnqueueOrder>* run_order,
+ scoped_refptr<TestTaskQueue> task_queue) {
+ run_order->push_back(EnqueueOrder::FromIntForTesting(id));
+ task_queue->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ task_queue->PostTask(FROM_HERE, BindOnce(&TestTask, id + 1, run_order));
+
+ // Force reload of immediate work queue. In real life the same effect can be
+ // achieved with cross-thread posting.
+ task_queue->GetTaskQueueImpl()->ReloadImmediateWorkQueueIfEmpty();
+}
+
+} // namespace
+
+TEST_P(SequenceManagerTestWithMessageLoop, TaskQueueDisabledFromNestedLoop) {
+ CreateTaskQueues(1u);
+ std::vector<EnqueueOrder> run_order;
+
+ std::vector<std::pair<OnceClosure, bool>> tasks_to_post_from_nested_loop;
+
+ tasks_to_post_from_nested_loop.push_back(
+ std::make_pair(BindOnce(&TestTask, 1, &run_order), false));
+ tasks_to_post_from_nested_loop.push_back(std::make_pair(
+ BindOnce(&InsertFenceAndPostTestTask, 2, &run_order, runners_[0]), true));
+
+ runners_[0]->PostTask(
+ FROM_HERE, BindOnce(&PostFromNestedRunloop, RetainedRef(runners_[0]),
+ Unretained(&tasks_to_post_from_nested_loop)));
+ RunLoop().RunUntilIdle();
+
+ // Task 1 shouldn't run first due to it being non-nestable and queue gets
+ // blocked after task 2. Task 1 runs after existing nested message loop
+ // due to being posted before inserting a fence.
+ // This test checks that breaks when nestable task is pushed into a redo
+ // queue.
+ EXPECT_THAT(run_order, ElementsAre(2u, 1u));
+
+ runners_[0]->RemoveFence();
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(2u, 1u, 3u));
+}
+
+TEST_P(SequenceManagerTest, HasPendingImmediateWork_ImmediateTask) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ EXPECT_FALSE(runners_[0]->HasTaskToRunImmediately());
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ EXPECT_TRUE(runners_[0]->HasTaskToRunImmediately());
+
+ // Move the task into the |immediate_work_queue|.
+ EXPECT_TRUE(runners_[0]->GetTaskQueueImpl()->immediate_work_queue()->Empty());
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter =
+ runners_[0]->CreateQueueEnabledVoter();
+ voter->SetQueueEnabled(false);
+ RunLoop().RunUntilIdle();
+ EXPECT_FALSE(
+ runners_[0]->GetTaskQueueImpl()->immediate_work_queue()->Empty());
+ EXPECT_TRUE(runners_[0]->HasTaskToRunImmediately());
+
+ // Run the task, making the queue empty.
+ voter->SetQueueEnabled(true);
+ RunLoop().RunUntilIdle();
+ EXPECT_FALSE(runners_[0]->HasTaskToRunImmediately());
+}
+
+TEST_P(SequenceManagerTest, HasPendingImmediateWork_DelayedTask) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ TimeDelta delay(TimeDelta::FromMilliseconds(10));
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order),
+ delay);
+ EXPECT_FALSE(runners_[0]->HasTaskToRunImmediately());
+ test_task_runner_->AdvanceMockTickClock(delay);
+ EXPECT_TRUE(runners_[0]->HasTaskToRunImmediately());
+
+ // Move the task into the |delayed_work_queue|.
+ LazyNow lazy_now(GetTickClock());
+ manager_->WakeUpReadyDelayedQueues(&lazy_now);
+ EXPECT_FALSE(runners_[0]->GetTaskQueueImpl()->delayed_work_queue()->Empty());
+ EXPECT_TRUE(runners_[0]->HasTaskToRunImmediately());
+
+ // Run the task, making the queue empty.
+ RunLoop().RunUntilIdle();
+ EXPECT_FALSE(runners_[0]->HasTaskToRunImmediately());
+}
+
+TEST_P(SequenceManagerTest, DelayedTaskPosting) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ TimeDelta delay(TimeDelta::FromMilliseconds(10));
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order),
+ delay);
+ EXPECT_EQ(TimeDelta::FromMilliseconds(10),
+ test_task_runner_->NextPendingTaskDelay());
+ EXPECT_FALSE(runners_[0]->HasTaskToRunImmediately());
+ EXPECT_TRUE(run_order.empty());
+
+ // The task doesn't run before the delay has completed.
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(9));
+ EXPECT_TRUE(run_order.empty());
+
+ // After the delay has completed, the task runs normally.
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(1));
+ EXPECT_THAT(run_order, ElementsAre(1u));
+ EXPECT_FALSE(runners_[0]->HasTaskToRunImmediately());
+}
+
+TEST_P(SequenceManagerTest, DelayedTaskExecutedInOneMessageLoopTask) {
+ CreateTaskQueues(1u);
+
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask),
+ TimeDelta::FromMilliseconds(10));
+ RunLoop().RunUntilIdle();
+ EXPECT_EQ(1u, test_task_runner_->GetPendingTaskCount());
+ test_task_runner_->FastForwardUntilNoTasksRemain();
+ EXPECT_EQ(0u, test_task_runner_->GetPendingTaskCount());
+}
+
+TEST_P(SequenceManagerTest, DelayedTaskPosting_MultipleTasks_DecendingOrder) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order),
+ TimeDelta::FromMilliseconds(10));
+
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order),
+ TimeDelta::FromMilliseconds(8));
+
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order),
+ TimeDelta::FromMilliseconds(5));
+
+ EXPECT_EQ(TimeDelta::FromMilliseconds(5),
+ test_task_runner_->NextPendingTaskDelay());
+
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(5));
+ EXPECT_THAT(run_order, ElementsAre(3u));
+ EXPECT_EQ(TimeDelta::FromMilliseconds(3),
+ test_task_runner_->NextPendingTaskDelay());
+
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(3));
+ EXPECT_THAT(run_order, ElementsAre(3u, 2u));
+ EXPECT_EQ(TimeDelta::FromMilliseconds(2),
+ test_task_runner_->NextPendingTaskDelay());
+
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(2));
+ EXPECT_THAT(run_order, ElementsAre(3u, 2u, 1u));
+}
+
+TEST_P(SequenceManagerTest, DelayedTaskPosting_MultipleTasks_AscendingOrder) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order),
+ TimeDelta::FromMilliseconds(1));
+
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order),
+ TimeDelta::FromMilliseconds(5));
+
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order),
+ TimeDelta::FromMilliseconds(10));
+
+ EXPECT_EQ(TimeDelta::FromMilliseconds(1),
+ test_task_runner_->NextPendingTaskDelay());
+
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(1));
+ EXPECT_THAT(run_order, ElementsAre(1u));
+ EXPECT_EQ(TimeDelta::FromMilliseconds(4),
+ test_task_runner_->NextPendingTaskDelay());
+
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(4));
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u));
+ EXPECT_EQ(TimeDelta::FromMilliseconds(5),
+ test_task_runner_->NextPendingTaskDelay());
+
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(5));
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u, 3u));
+}
+
+TEST_P(SequenceManagerTest, PostDelayedTask_SharesUnderlyingDelayedTasks) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ TimeDelta delay(TimeDelta::FromMilliseconds(10));
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order),
+ delay);
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order),
+ delay);
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order),
+ delay);
+
+ EXPECT_EQ(1u, test_task_runner_->GetPendingTaskCount());
+}
+
+class TestObject {
+ public:
+ ~TestObject() { destructor_count__++; }
+
+ void Run() { FAIL() << "TestObject::Run should not be called"; }
+
+ static int destructor_count__;
+};
+
+int TestObject::destructor_count__ = 0;
+
+TEST_P(SequenceManagerTest, PendingDelayedTasksRemovedOnShutdown) {
+ CreateTaskQueues(1u);
+
+ TestObject::destructor_count__ = 0;
+
+ TimeDelta delay(TimeDelta::FromMilliseconds(10));
+ runners_[0]->PostDelayedTask(
+ FROM_HERE, BindOnce(&TestObject::Run, Owned(new TestObject())), delay);
+ runners_[0]->PostTask(FROM_HERE,
+ BindOnce(&TestObject::Run, Owned(new TestObject())));
+
+ manager_.reset();
+
+ EXPECT_EQ(2, TestObject::destructor_count__);
+}
+
+TEST_P(SequenceManagerTest, InsertAndRemoveFence) {
+ CreateTaskQueues(1u);
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+
+ std::vector<EnqueueOrder> run_order;
+ // Posting a task when pumping is disabled doesn't result in work getting
+ // posted.
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ EXPECT_FALSE(test_task_runner_->HasPendingTask());
+
+ // However polling still works.
+ EXPECT_TRUE(runners_[0]->HasTaskToRunImmediately());
+
+ // After removing the fence the task runs normally.
+ runners_[0]->RemoveFence();
+ EXPECT_TRUE(test_task_runner_->HasPendingTask());
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1u));
+}
+
+TEST_P(SequenceManagerTest, RemovingFenceForDisabledQueueDoesNotPostDoWork) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter =
+ runners_[0]->CreateQueueEnabledVoter();
+ voter->SetQueueEnabled(false);
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+
+ runners_[0]->RemoveFence();
+ EXPECT_FALSE(test_task_runner_->HasPendingTask());
+}
+
+TEST_P(SequenceManagerTest, EnablingFencedQueueDoesNotPostDoWork) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter =
+ runners_[0]->CreateQueueEnabledVoter();
+ voter->SetQueueEnabled(false);
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+
+ voter->SetQueueEnabled(true);
+ EXPECT_FALSE(test_task_runner_->HasPendingTask());
+}
+
+TEST_P(SequenceManagerTest, DenyRunning_BeforePosting) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter =
+ runners_[0]->CreateQueueEnabledVoter();
+ voter->SetQueueEnabled(false);
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ EXPECT_FALSE(test_task_runner_->HasPendingTask());
+
+ RunLoop().RunUntilIdle();
+ EXPECT_TRUE(run_order.empty());
+
+ voter->SetQueueEnabled(true);
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1u));
+}
+
+TEST_P(SequenceManagerTest, DenyRunning_AfterPosting) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter =
+ runners_[0]->CreateQueueEnabledVoter();
+ EXPECT_TRUE(test_task_runner_->HasPendingTask());
+ voter->SetQueueEnabled(false);
+
+ RunLoop().RunUntilIdle();
+ EXPECT_TRUE(run_order.empty());
+
+ voter->SetQueueEnabled(true);
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1u));
+}
+
+TEST_P(SequenceManagerTest, DenyRunning_AfterRemovingFence) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter =
+ runners_[0]->CreateQueueEnabledVoter();
+ voter->SetQueueEnabled(false);
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+
+ RunLoop().RunUntilIdle();
+ EXPECT_TRUE(run_order.empty());
+
+ runners_[0]->RemoveFence();
+ voter->SetQueueEnabled(true);
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1u));
+}
+
+TEST_P(SequenceManagerTest, RemovingFenceWithDelayedTask) {
+ CreateTaskQueues(1u);
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+
+ std::vector<EnqueueOrder> run_order;
+ // Posting a delayed task when fenced will apply the delay, but won't cause
+ // work to executed afterwards.
+ TimeDelta delay(TimeDelta::FromMilliseconds(10));
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order),
+ delay);
+
+ // The task does not run even though it's delay is up.
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(10));
+ EXPECT_TRUE(run_order.empty());
+
+ // Removing the fence causes the task to run.
+ runners_[0]->RemoveFence();
+ EXPECT_TRUE(test_task_runner_->HasPendingTask());
+ RunPendingTasks();
+ EXPECT_THAT(run_order, ElementsAre(1u));
+}
+
+TEST_P(SequenceManagerTest, RemovingFenceWithMultipleDelayedTasks) {
+ CreateTaskQueues(1u);
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+
+ std::vector<EnqueueOrder> run_order;
+ // Posting a delayed task when fenced will apply the delay, but won't cause
+ // work to executed afterwards.
+ TimeDelta delay1(TimeDelta::FromMilliseconds(1));
+ TimeDelta delay2(TimeDelta::FromMilliseconds(10));
+ TimeDelta delay3(TimeDelta::FromMilliseconds(20));
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order),
+ delay1);
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order),
+ delay2);
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order),
+ delay3);
+
+ test_task_runner_->AdvanceMockTickClock(TimeDelta::FromMilliseconds(15));
+ RunLoop().RunUntilIdle();
+ EXPECT_TRUE(run_order.empty());
+
+ // Removing the fence causes the ready tasks to run.
+ runners_[0]->RemoveFence();
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u));
+}
+
+TEST_P(SequenceManagerTest, InsertFencePreventsDelayedTasksFromRunning) {
+ CreateTaskQueues(1u);
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+
+ std::vector<EnqueueOrder> run_order;
+ TimeDelta delay(TimeDelta::FromMilliseconds(10));
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order),
+ delay);
+
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(10));
+ EXPECT_TRUE(run_order.empty());
+}
+
+TEST_P(SequenceManagerTest, MultipleFences) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order));
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order));
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u));
+
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ // Subsequent tasks should be blocked.
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 4, &run_order));
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u, 3u));
+}
+
+TEST_P(SequenceManagerTest, InsertFenceThenImmediatlyRemoveDoesNotBlock) {
+ CreateTaskQueues(1u);
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ runners_[0]->RemoveFence();
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order));
+
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u));
+}
+
+TEST_P(SequenceManagerTest, InsertFencePostThenRemoveDoesNotBlock) {
+ CreateTaskQueues(1u);
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order));
+ runners_[0]->RemoveFence();
+
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u));
+}
+
+TEST_P(SequenceManagerTest, MultipleFencesWithInitiallyEmptyQueue) {
+ CreateTaskQueues(1u);
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order));
+
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1u));
+}
+
+TEST_P(SequenceManagerTest, BlockedByFence) {
+ CreateTaskQueues(1u);
+ EXPECT_FALSE(runners_[0]->BlockedByFence());
+
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ EXPECT_TRUE(runners_[0]->BlockedByFence());
+
+ runners_[0]->RemoveFence();
+ EXPECT_FALSE(runners_[0]->BlockedByFence());
+
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&NopTask));
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ EXPECT_FALSE(runners_[0]->BlockedByFence());
+
+ RunLoop().RunUntilIdle();
+ EXPECT_TRUE(runners_[0]->BlockedByFence());
+
+ runners_[0]->RemoveFence();
+ EXPECT_FALSE(runners_[0]->BlockedByFence());
+}
+
+TEST_P(SequenceManagerTest, BlockedByFence_BothTypesOfFence) {
+ CreateTaskQueues(1u);
+
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&NopTask));
+
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ EXPECT_FALSE(runners_[0]->BlockedByFence());
+
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kBeginningOfTime);
+ EXPECT_TRUE(runners_[0]->BlockedByFence());
+}
+
+namespace {
+
+void RecordTimeTask(std::vector<TimeTicks>* run_times, const TickClock* clock) {
+ run_times->push_back(clock->NowTicks());
+}
+
+void RecordTimeAndQueueTask(
+ std::vector<std::pair<scoped_refptr<TestTaskQueue>, TimeTicks>>* run_times,
+ scoped_refptr<TestTaskQueue> task_queue,
+ const TickClock* clock) {
+ run_times->emplace_back(task_queue, clock->NowTicks());
+}
+
+} // namespace
+
+TEST_P(SequenceManagerTest, DelayedFence_DelayedTasks) {
+ CreateTaskQueues(1u);
+
+ std::vector<TimeTicks> run_times;
+ runners_[0]->PostDelayedTask(
+ FROM_HERE, BindOnce(&RecordTimeTask, &run_times, GetTickClock()),
+ TimeDelta::FromMilliseconds(100));
+ runners_[0]->PostDelayedTask(
+ FROM_HERE, BindOnce(&RecordTimeTask, &run_times, GetTickClock()),
+ TimeDelta::FromMilliseconds(200));
+ runners_[0]->PostDelayedTask(
+ FROM_HERE, BindOnce(&RecordTimeTask, &run_times, GetTickClock()),
+ TimeDelta::FromMilliseconds(300));
+
+ runners_[0]->InsertFenceAt(GetTickClock()->NowTicks() +
+ TimeDelta::FromMilliseconds(250));
+ EXPECT_FALSE(runners_[0]->HasActiveFence());
+
+ test_task_runner_->FastForwardUntilNoTasksRemain();
+
+ EXPECT_TRUE(runners_[0]->HasActiveFence());
+ EXPECT_THAT(run_times,
+ ElementsAre(start_time_ + TimeDelta::FromMilliseconds(100),
+ start_time_ + TimeDelta::FromMilliseconds(200)));
+ run_times.clear();
+
+ runners_[0]->RemoveFence();
+
+ test_task_runner_->FastForwardUntilNoTasksRemain();
+
+ EXPECT_FALSE(runners_[0]->HasActiveFence());
+ EXPECT_THAT(run_times,
+ ElementsAre(start_time_ + TimeDelta::FromMilliseconds(300)));
+}
+
+TEST_P(SequenceManagerTest, DelayedFence_ImmediateTasks) {
+ CreateTaskQueues(1u);
+
+ std::vector<TimeTicks> run_times;
+ runners_[0]->InsertFenceAt(GetTickClock()->NowTicks() +
+ TimeDelta::FromMilliseconds(250));
+
+ for (int i = 0; i < 5; ++i) {
+ runners_[0]->PostTask(
+ FROM_HERE, BindOnce(&RecordTimeTask, &run_times, GetTickClock()));
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(100));
+ if (i < 2) {
+ EXPECT_FALSE(runners_[0]->HasActiveFence());
+ } else {
+ EXPECT_TRUE(runners_[0]->HasActiveFence());
+ }
+ }
+
+ EXPECT_THAT(
+ run_times,
+ ElementsAre(start_time_, start_time_ + TimeDelta::FromMilliseconds(100),
+ start_time_ + TimeDelta::FromMilliseconds(200)));
+ run_times.clear();
+
+ runners_[0]->RemoveFence();
+ test_task_runner_->FastForwardUntilNoTasksRemain();
+
+ EXPECT_THAT(run_times,
+ ElementsAre(start_time_ + TimeDelta::FromMilliseconds(500),
+ start_time_ + TimeDelta::FromMilliseconds(500)));
+}
+
+TEST_P(SequenceManagerTest, DelayedFence_RemovedFenceDoesNotActivate) {
+ CreateTaskQueues(1u);
+
+ std::vector<TimeTicks> run_times;
+ runners_[0]->InsertFenceAt(GetTickClock()->NowTicks() +
+ TimeDelta::FromMilliseconds(250));
+
+ for (int i = 0; i < 3; ++i) {
+ runners_[0]->PostTask(
+ FROM_HERE, BindOnce(&RecordTimeTask, &run_times, GetTickClock()));
+ EXPECT_FALSE(runners_[0]->HasActiveFence());
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(100));
+ }
+
+ EXPECT_TRUE(runners_[0]->HasActiveFence());
+ runners_[0]->RemoveFence();
+
+ for (int i = 0; i < 2; ++i) {
+ runners_[0]->PostTask(
+ FROM_HERE, BindOnce(&RecordTimeTask, &run_times, GetTickClock()));
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(100));
+ EXPECT_FALSE(runners_[0]->HasActiveFence());
+ }
+
+ EXPECT_THAT(
+ run_times,
+ ElementsAre(start_time_, start_time_ + TimeDelta::FromMilliseconds(100),
+ start_time_ + TimeDelta::FromMilliseconds(200),
+ start_time_ + TimeDelta::FromMilliseconds(300),
+ start_time_ + TimeDelta::FromMilliseconds(400)));
+}
+
+TEST_P(SequenceManagerTest, DelayedFence_TakeIncomingImmediateQueue) {
+ // This test checks that everything works correctly when a work queue
+ // is swapped with an immediate incoming queue and a delayed fence
+ // is activated, forcing a different queue to become active.
+ CreateTaskQueues(2u);
+
+ scoped_refptr<TestTaskQueue> queue1 = runners_[0];
+ scoped_refptr<TestTaskQueue> queue2 = runners_[1];
+
+ std::vector<std::pair<scoped_refptr<TestTaskQueue>, TimeTicks>> run_times;
+
+ // Fence ensures that the task posted after advancing time is blocked.
+ queue1->InsertFenceAt(GetTickClock()->NowTicks() +
+ TimeDelta::FromMilliseconds(250));
+
+ // This task should not be blocked and should run immediately after
+ // advancing time at 301ms.
+ queue1->PostTask(FROM_HERE, BindOnce(&RecordTimeAndQueueTask, &run_times,
+ queue1, GetTickClock()));
+ // Force reload of immediate work queue. In real life the same effect can be
+ // achieved with cross-thread posting.
+ queue1->GetTaskQueueImpl()->ReloadImmediateWorkQueueIfEmpty();
+
+ test_task_runner_->AdvanceMockTickClock(TimeDelta::FromMilliseconds(300));
+
+ // This task should be blocked.
+ queue1->PostTask(FROM_HERE, BindOnce(&RecordTimeAndQueueTask, &run_times,
+ queue1, GetTickClock()));
+ // This task on a different runner should run as expected.
+ queue2->PostTask(FROM_HERE, BindOnce(&RecordTimeAndQueueTask, &run_times,
+ queue2, GetTickClock()));
+
+ test_task_runner_->FastForwardUntilNoTasksRemain();
+
+ EXPECT_THAT(
+ run_times,
+ ElementsAre(std::make_pair(
+ queue1, start_time_ + TimeDelta::FromMilliseconds(300)),
+ std::make_pair(
+ queue2, start_time_ + TimeDelta::FromMilliseconds(300))));
+}
+
+namespace {
+
+void ReentrantTestTask(scoped_refptr<SingleThreadTaskRunner> runner,
+ int countdown,
+ std::vector<EnqueueOrder>* out_result) {
+ out_result->push_back(EnqueueOrder::FromIntForTesting(countdown));
+ if (--countdown) {
+ runner->PostTask(
+ FROM_HERE, BindOnce(&ReentrantTestTask, runner, countdown, out_result));
+ }
+}
+
+} // namespace
+
+TEST_P(SequenceManagerTest, ReentrantPosting) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(
+ FROM_HERE, BindOnce(&ReentrantTestTask, runners_[0], 3, &run_order));
+
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(3u, 2u, 1u));
+}
+
+TEST_P(SequenceManagerTest, NoTasksAfterShutdown) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ manager_.reset();
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+
+ RunLoop().RunUntilIdle();
+ EXPECT_TRUE(run_order.empty());
+}
+
+void PostTaskToRunner(scoped_refptr<SingleThreadTaskRunner> runner,
+ std::vector<EnqueueOrder>* run_order) {
+ runner->PostTask(FROM_HERE, BindOnce(&TestTask, 1, run_order));
+}
+
+TEST_P(SequenceManagerTestWithMessageLoop, PostFromThread) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ Thread thread("TestThread");
+ thread.Start();
+ thread.task_runner()->PostTask(
+ FROM_HERE, BindOnce(&PostTaskToRunner, runners_[0], &run_order));
+ thread.Stop();
+
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1u));
+}
+
+void RePostingTestTask(scoped_refptr<SingleThreadTaskRunner> runner,
+ int* run_count) {
+ (*run_count)++;
+ runner->PostTask(FROM_HERE, BindOnce(&RePostingTestTask,
+ Unretained(runner.get()), run_count));
+}
+
+TEST_P(SequenceManagerTest, DoWorkCantPostItselfMultipleTimes) {
+ CreateTaskQueues(1u);
+
+ int run_count = 0;
+ runners_[0]->PostTask(FROM_HERE,
+ BindOnce(&RePostingTestTask, runners_[0], &run_count));
+
+ RunPendingTasks();
+ EXPECT_EQ(1u, test_task_runner_->GetPendingTaskCount());
+ EXPECT_EQ(1, run_count);
+}
+
+TEST_P(SequenceManagerTestWithMessageLoop, PostFromNestedRunloop) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ std::vector<std::pair<OnceClosure, bool>> tasks_to_post_from_nested_loop;
+ tasks_to_post_from_nested_loop.push_back(
+ std::make_pair(BindOnce(&TestTask, 1, &run_order), true));
+
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 0, &run_order));
+ runners_[0]->PostTask(
+ FROM_HERE, BindOnce(&PostFromNestedRunloop, RetainedRef(runners_[0]),
+ Unretained(&tasks_to_post_from_nested_loop)));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order));
+
+ RunLoop().RunUntilIdle();
+
+ EXPECT_THAT(run_order, ElementsAre(0u, 2u, 1u));
+}
+
+TEST_P(SequenceManagerTest, WorkBatching) {
+ CreateTaskQueues(1u);
+
+ manager_->SetWorkBatchSize(2);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 4, &run_order));
+
+ // Running one task in the host message loop should cause two posted tasks to
+ // get executed.
+ EXPECT_EQ(1u, test_task_runner_->GetPendingTaskCount());
+ RunPendingTasks();
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u));
+
+ // The second task runs the remaining two posted tasks.
+ EXPECT_EQ(1u, test_task_runner_->GetPendingTaskCount());
+ RunPendingTasks();
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u, 3u, 4u));
+}
+
+class MockTaskObserver : public MessageLoop::TaskObserver {
+ public:
+ MOCK_METHOD1(DidProcessTask, void(const PendingTask& task));
+ MOCK_METHOD1(WillProcessTask, void(const PendingTask& task));
+};
+
+TEST_P(SequenceManagerTestWithMessageLoop, TaskObserverAdding) {
+ CreateTaskQueues(1u);
+ MockTaskObserver observer;
+
+ manager_->SetWorkBatchSize(2);
+ manager_->AddTaskObserver(&observer);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order));
+
+ EXPECT_CALL(observer, WillProcessTask(_)).Times(2);
+ EXPECT_CALL(observer, DidProcessTask(_)).Times(2);
+ RunLoop().RunUntilIdle();
+}
+
+TEST_P(SequenceManagerTestWithMessageLoop, TaskObserverRemoving) {
+ CreateTaskQueues(1u);
+ MockTaskObserver observer;
+ manager_->SetWorkBatchSize(2);
+ manager_->AddTaskObserver(&observer);
+ manager_->RemoveTaskObserver(&observer);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+
+ EXPECT_CALL(observer, WillProcessTask(_)).Times(0);
+ EXPECT_CALL(observer, DidProcessTask(_)).Times(0);
+ RunLoop().RunUntilIdle();
+}
+
+void RemoveObserverTask(SequenceManagerImpl* manager,
+ MessageLoop::TaskObserver* observer) {
+ manager->RemoveTaskObserver(observer);
+}
+
+TEST_P(SequenceManagerTestWithMessageLoop, TaskObserverRemovingInsideTask) {
+ CreateTaskQueues(1u);
+ MockTaskObserver observer;
+ manager_->SetWorkBatchSize(3);
+ manager_->AddTaskObserver(&observer);
+
+ runners_[0]->PostTask(
+ FROM_HERE, BindOnce(&RemoveObserverTask, manager_.get(), &observer));
+
+ EXPECT_CALL(observer, WillProcessTask(_)).Times(1);
+ EXPECT_CALL(observer, DidProcessTask(_)).Times(0);
+ RunLoop().RunUntilIdle();
+}
+
+TEST_P(SequenceManagerTestWithMessageLoop, QueueTaskObserverAdding) {
+ CreateTaskQueues(2u);
+ MockTaskObserver observer;
+
+ manager_->SetWorkBatchSize(2);
+ runners_[0]->AddTaskObserver(&observer);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ runners_[1]->PostTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order));
+
+ EXPECT_CALL(observer, WillProcessTask(_)).Times(1);
+ EXPECT_CALL(observer, DidProcessTask(_)).Times(1);
+ RunLoop().RunUntilIdle();
+}
+
+TEST_P(SequenceManagerTestWithMessageLoop, QueueTaskObserverRemoving) {
+ CreateTaskQueues(1u);
+ MockTaskObserver observer;
+ manager_->SetWorkBatchSize(2);
+ runners_[0]->AddTaskObserver(&observer);
+ runners_[0]->RemoveTaskObserver(&observer);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+
+ EXPECT_CALL(observer, WillProcessTask(_)).Times(0);
+ EXPECT_CALL(observer, DidProcessTask(_)).Times(0);
+
+ RunLoop().RunUntilIdle();
+}
+
+void RemoveQueueObserverTask(scoped_refptr<TaskQueue> queue,
+ MessageLoop::TaskObserver* observer) {
+ queue->RemoveTaskObserver(observer);
+}
+
+TEST_P(SequenceManagerTestWithMessageLoop,
+ QueueTaskObserverRemovingInsideTask) {
+ CreateTaskQueues(1u);
+ MockTaskObserver observer;
+ runners_[0]->AddTaskObserver(&observer);
+
+ runners_[0]->PostTask(
+ FROM_HERE, BindOnce(&RemoveQueueObserverTask, runners_[0], &observer));
+
+ EXPECT_CALL(observer, WillProcessTask(_)).Times(1);
+ EXPECT_CALL(observer, DidProcessTask(_)).Times(0);
+ RunLoop().RunUntilIdle();
+}
+
+TEST_P(SequenceManagerTest, ThreadCheckAfterTermination) {
+ CreateTaskQueues(1u);
+ EXPECT_TRUE(runners_[0]->RunsTasksInCurrentSequence());
+ manager_.reset();
+ EXPECT_TRUE(runners_[0]->RunsTasksInCurrentSequence());
+}
+
+TEST_P(SequenceManagerTest, TimeDomain_NextScheduledRunTime) {
+ CreateTaskQueues(2u);
+ test_task_runner_->AdvanceMockTickClock(TimeDelta::FromMicroseconds(10000));
+ LazyNow lazy_now_1(GetTickClock());
+
+ // With no delayed tasks.
+ EXPECT_FALSE(manager_->GetRealTimeDomain()->DelayTillNextTask(&lazy_now_1));
+
+ // With a non-delayed task.
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&NopTask));
+ EXPECT_FALSE(manager_->GetRealTimeDomain()->DelayTillNextTask(&lazy_now_1));
+
+ // With a delayed task.
+ TimeDelta expected_delay = TimeDelta::FromMilliseconds(50);
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask), expected_delay);
+ EXPECT_EQ(expected_delay,
+ manager_->GetRealTimeDomain()->DelayTillNextTask(&lazy_now_1));
+
+ // With another delayed task in the same queue with a longer delay.
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask),
+ TimeDelta::FromMilliseconds(100));
+ EXPECT_EQ(expected_delay,
+ manager_->GetRealTimeDomain()->DelayTillNextTask(&lazy_now_1));
+
+ // With another delayed task in the same queue with a shorter delay.
+ expected_delay = TimeDelta::FromMilliseconds(20);
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask), expected_delay);
+ EXPECT_EQ(expected_delay,
+ manager_->GetRealTimeDomain()->DelayTillNextTask(&lazy_now_1));
+
+ // With another delayed task in a different queue with a shorter delay.
+ expected_delay = TimeDelta::FromMilliseconds(10);
+ runners_[1]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask), expected_delay);
+ EXPECT_EQ(expected_delay,
+ manager_->GetRealTimeDomain()->DelayTillNextTask(&lazy_now_1));
+
+ // Test it updates as time progresses
+ test_task_runner_->AdvanceMockTickClock(expected_delay);
+ LazyNow lazy_now_2(GetTickClock());
+ EXPECT_EQ(TimeDelta(),
+ manager_->GetRealTimeDomain()->DelayTillNextTask(&lazy_now_2));
+}
+
+TEST_P(SequenceManagerTest, TimeDomain_NextScheduledRunTime_MultipleQueues) {
+ CreateTaskQueues(3u);
+
+ TimeDelta delay1 = TimeDelta::FromMilliseconds(50);
+ TimeDelta delay2 = TimeDelta::FromMilliseconds(5);
+ TimeDelta delay3 = TimeDelta::FromMilliseconds(10);
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask), delay1);
+ runners_[1]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask), delay2);
+ runners_[2]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask), delay3);
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&NopTask));
+
+ LazyNow lazy_now(GetTickClock());
+ EXPECT_EQ(delay2,
+ manager_->GetRealTimeDomain()->DelayTillNextTask(&lazy_now));
+}
+
+TEST_P(SequenceManagerTest, DeleteSequenceManagerInsideATask) {
+ CreateTaskQueues(1u);
+
+ runners_[0]->PostTask(
+ FROM_HERE, BindOnce(&SequenceManagerTest::DeleteSequenceManagerTask,
+ Unretained(this)));
+
+ // This should not crash, assuming DoWork detects the SequenceManager has
+ // been deleted.
+ RunLoop().RunUntilIdle();
+}
+
+TEST_P(SequenceManagerTest, GetAndClearSystemIsQuiescentBit) {
+ CreateTaskQueues(3u);
+
+ scoped_refptr<TaskQueue> queue0 =
+ CreateTaskQueue(TaskQueue::Spec("test").SetShouldMonitorQuiescence(true));
+ scoped_refptr<TaskQueue> queue1 =
+ CreateTaskQueue(TaskQueue::Spec("test").SetShouldMonitorQuiescence(true));
+ scoped_refptr<TaskQueue> queue2 = CreateTaskQueue();
+
+ EXPECT_TRUE(manager_->GetAndClearSystemIsQuiescentBit());
+
+ queue0->PostTask(FROM_HERE, BindOnce(&NopTask));
+ RunLoop().RunUntilIdle();
+ EXPECT_FALSE(manager_->GetAndClearSystemIsQuiescentBit());
+ EXPECT_TRUE(manager_->GetAndClearSystemIsQuiescentBit());
+
+ queue1->PostTask(FROM_HERE, BindOnce(&NopTask));
+ RunLoop().RunUntilIdle();
+ EXPECT_FALSE(manager_->GetAndClearSystemIsQuiescentBit());
+ EXPECT_TRUE(manager_->GetAndClearSystemIsQuiescentBit());
+
+ queue2->PostTask(FROM_HERE, BindOnce(&NopTask));
+ RunLoop().RunUntilIdle();
+ EXPECT_TRUE(manager_->GetAndClearSystemIsQuiescentBit());
+
+ queue0->PostTask(FROM_HERE, BindOnce(&NopTask));
+ queue1->PostTask(FROM_HERE, BindOnce(&NopTask));
+ RunLoop().RunUntilIdle();
+ EXPECT_FALSE(manager_->GetAndClearSystemIsQuiescentBit());
+ EXPECT_TRUE(manager_->GetAndClearSystemIsQuiescentBit());
+}
+
+TEST_P(SequenceManagerTest, HasPendingImmediateWork) {
+ CreateTaskQueues(1u);
+
+ EXPECT_FALSE(runners_[0]->HasTaskToRunImmediately());
+ runners_[0]->PostTask(FROM_HERE, BindOnce(NullTask));
+ EXPECT_TRUE(runners_[0]->HasTaskToRunImmediately());
+
+ RunLoop().RunUntilIdle();
+ EXPECT_FALSE(runners_[0]->HasTaskToRunImmediately());
+}
+
+TEST_P(SequenceManagerTest, HasPendingImmediateWork_DelayedTasks) {
+ CreateTaskQueues(1u);
+
+ EXPECT_FALSE(runners_[0]->HasTaskToRunImmediately());
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(NullTask),
+ TimeDelta::FromMilliseconds(12));
+ EXPECT_FALSE(runners_[0]->HasTaskToRunImmediately());
+
+ // Move time forwards until just before the delayed task should run.
+ test_task_runner_->AdvanceMockTickClock(TimeDelta::FromMilliseconds(10));
+ LazyNow lazy_now_1(GetTickClock());
+ manager_->WakeUpReadyDelayedQueues(&lazy_now_1);
+ EXPECT_FALSE(runners_[0]->HasTaskToRunImmediately());
+
+ // Force the delayed task onto the work queue.
+ test_task_runner_->AdvanceMockTickClock(TimeDelta::FromMilliseconds(2));
+ LazyNow lazy_now_2(GetTickClock());
+ manager_->WakeUpReadyDelayedQueues(&lazy_now_2);
+ EXPECT_TRUE(runners_[0]->HasTaskToRunImmediately());
+
+ RunLoop().RunUntilIdle();
+ EXPECT_FALSE(runners_[0]->HasTaskToRunImmediately());
+}
+
+void ExpensiveTestTask(int value,
+ scoped_refptr<TestMockTimeTaskRunner> test_task_runner,
+ std::vector<EnqueueOrder>* out_result) {
+ out_result->push_back(EnqueueOrder::FromIntForTesting(value));
+ test_task_runner->FastForwardBy(TimeDelta::FromMilliseconds(1));
+}
+
+TEST_P(SequenceManagerTest, ImmediateAndDelayedTaskInterleaving) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ TimeDelta delay = TimeDelta::FromMilliseconds(10);
+ for (int i = 10; i < 19; i++) {
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&ExpensiveTestTask, i, test_task_runner_, &run_order), delay);
+ }
+
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(10));
+
+ for (int i = 0; i < 9; i++) {
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&ExpensiveTestTask, i,
+ test_task_runner_, &run_order));
+ }
+
+ test_task_runner_->FastForwardUntilNoTasksRemain();
+
+ // Delayed tasks are not allowed to starve out immediate work which is why
+ // some of the immediate tasks run out of order.
+ uint64_t expected_run_order[] = {10u, 11u, 12u, 13u, 0u, 14u, 15u, 16u, 1u,
+ 17u, 18u, 2u, 3u, 4u, 5u, 6u, 7u, 8u};
+ EXPECT_THAT(run_order, ElementsAreArray(expected_run_order));
+}
+
+TEST_P(SequenceManagerTest,
+ DelayedTaskDoesNotSkipAHeadOfNonDelayedTask_SameQueue) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ TimeDelta delay = TimeDelta::FromMilliseconds(10);
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order));
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order),
+ delay);
+
+ test_task_runner_->AdvanceMockTickClock(delay * 2);
+ RunLoop().RunUntilIdle();
+
+ EXPECT_THAT(run_order, ElementsAre(2u, 3u, 1u));
+}
+
+TEST_P(SequenceManagerTest,
+ DelayedTaskDoesNotSkipAHeadOfNonDelayedTask_DifferentQueues) {
+ CreateTaskQueues(2u);
+
+ std::vector<EnqueueOrder> run_order;
+ TimeDelta delay = TimeDelta::FromMilliseconds(10);
+ runners_[1]->PostTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order));
+ runners_[1]->PostTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order));
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order),
+ delay);
+
+ test_task_runner_->AdvanceMockTickClock(delay * 2);
+ RunLoop().RunUntilIdle();
+
+ EXPECT_THAT(run_order, ElementsAre(2u, 3u, 1u));
+}
+
+TEST_P(SequenceManagerTest, DelayedTaskDoesNotSkipAHeadOfShorterDelayedTask) {
+ CreateTaskQueues(2u);
+
+ std::vector<EnqueueOrder> run_order;
+ TimeDelta delay1 = TimeDelta::FromMilliseconds(10);
+ TimeDelta delay2 = TimeDelta::FromMilliseconds(5);
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order),
+ delay1);
+ runners_[1]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order),
+ delay2);
+
+ test_task_runner_->AdvanceMockTickClock(delay1 * 2);
+ RunLoop().RunUntilIdle();
+
+ EXPECT_THAT(run_order, ElementsAre(2u, 1u));
+}
+
+void CheckIsNested(bool* is_nested) {
+ *is_nested = RunLoop::IsNestedOnCurrentThread();
+}
+
+void PostAndQuitFromNestedRunloop(RunLoop* run_loop,
+ SingleThreadTaskRunner* runner,
+ bool* was_nested) {
+ runner->PostTask(FROM_HERE, run_loop->QuitClosure());
+ runner->PostTask(FROM_HERE, BindOnce(&CheckIsNested, was_nested));
+ run_loop->Run();
+}
+
+TEST_P(SequenceManagerTestWithMessageLoop, QuitWhileNested) {
+ // This test makes sure we don't continue running a work batch after a nested
+ // run loop has been exited in the middle of the batch.
+ CreateTaskQueues(1u);
+ manager_->SetWorkBatchSize(2);
+
+ bool was_nested = true;
+ RunLoop run_loop(RunLoop::Type::kNestableTasksAllowed);
+ runners_[0]->PostTask(
+ FROM_HERE, BindOnce(&PostAndQuitFromNestedRunloop, Unretained(&run_loop),
+ RetainedRef(runners_[0]), Unretained(&was_nested)));
+
+ RunLoop().RunUntilIdle();
+ EXPECT_FALSE(was_nested);
+}
+
+class SequenceNumberCapturingTaskObserver : public MessageLoop::TaskObserver {
+ public:
+ // MessageLoop::TaskObserver overrides.
+ void WillProcessTask(const PendingTask& pending_task) override {}
+ void DidProcessTask(const PendingTask& pending_task) override {
+ sequence_numbers_.push_back(pending_task.sequence_num);
+ }
+
+ const std::vector<int>& sequence_numbers() const { return sequence_numbers_; }
+
+ private:
+ std::vector<int> sequence_numbers_;
+};
+
+TEST_P(SequenceManagerTest, SequenceNumSetWhenTaskIsPosted) {
+ CreateTaskQueues(1u);
+
+ SequenceNumberCapturingTaskObserver observer;
+ manager_->AddTaskObserver(&observer);
+
+ // Register four tasks that will run in reverse order.
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order),
+ TimeDelta::FromMilliseconds(30));
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order),
+ TimeDelta::FromMilliseconds(20));
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order),
+ TimeDelta::FromMilliseconds(10));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 4, &run_order));
+
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(40));
+ ASSERT_THAT(run_order, ElementsAre(4u, 3u, 2u, 1u));
+
+ // The sequence numbers are a one-based monotonically incrememting counter
+ // which should be set when the task is posted rather than when it's enqueued
+ // onto the Incoming queue. This counter starts with 2.
+ EXPECT_THAT(observer.sequence_numbers(), ElementsAre(5, 4, 3, 2));
+
+ manager_->RemoveTaskObserver(&observer);
+}
+
+TEST_P(SequenceManagerTest, NewTaskQueues) {
+ CreateTaskQueues(1u);
+
+ scoped_refptr<TaskQueue> queue1 = CreateTaskQueue();
+ scoped_refptr<TaskQueue> queue2 = CreateTaskQueue();
+ scoped_refptr<TaskQueue> queue3 = CreateTaskQueue();
+
+ ASSERT_NE(queue1, queue2);
+ ASSERT_NE(queue1, queue3);
+ ASSERT_NE(queue2, queue3);
+
+ std::vector<EnqueueOrder> run_order;
+ queue1->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ queue2->PostTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order));
+ queue3->PostTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order));
+ RunLoop().RunUntilIdle();
+
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u, 3u));
+}
+
+TEST_P(SequenceManagerTest, ShutdownTaskQueue) {
+ CreateTaskQueues(1u);
+
+ scoped_refptr<TaskQueue> queue1 = CreateTaskQueue();
+ scoped_refptr<TaskQueue> queue2 = CreateTaskQueue();
+ scoped_refptr<TaskQueue> queue3 = CreateTaskQueue();
+
+ ASSERT_NE(queue1, queue2);
+ ASSERT_NE(queue1, queue3);
+ ASSERT_NE(queue2, queue3);
+
+ std::vector<EnqueueOrder> run_order;
+ queue1->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ queue2->PostTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order));
+ queue3->PostTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order));
+
+ queue2->ShutdownTaskQueue();
+ RunLoop().RunUntilIdle();
+
+ EXPECT_THAT(run_order, ElementsAre(1u, 3u));
+}
+
+TEST_P(SequenceManagerTest, ShutdownTaskQueue_WithDelayedTasks) {
+ CreateTaskQueues(2u);
+
+ // Register three delayed tasks
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order),
+ TimeDelta::FromMilliseconds(10));
+ runners_[1]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order),
+ TimeDelta::FromMilliseconds(20));
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order),
+ TimeDelta::FromMilliseconds(30));
+
+ runners_[1]->ShutdownTaskQueue();
+ RunLoop().RunUntilIdle();
+
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(40));
+ ASSERT_THAT(run_order, ElementsAre(1u, 3u));
+}
+
+namespace {
+void ShutdownQueue(scoped_refptr<TaskQueue> queue) {
+ queue->ShutdownTaskQueue();
+}
+} // namespace
+
+TEST_P(SequenceManagerTest, ShutdownTaskQueue_InTasks) {
+ CreateTaskQueues(3u);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&ShutdownQueue, runners_[1]));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&ShutdownQueue, runners_[2]));
+ runners_[1]->PostTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order));
+ runners_[2]->PostTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order));
+
+ RunLoop().RunUntilIdle();
+ ASSERT_THAT(run_order, ElementsAre(1u));
+}
+
+namespace {
+
+class MockObserver : public SequenceManager::Observer {
+ public:
+ MOCK_METHOD0(OnTriedToExecuteBlockedTask, void());
+ MOCK_METHOD0(OnBeginNestedRunLoop, void());
+ MOCK_METHOD0(OnExitNestedRunLoop, void());
+};
+
+} // namespace
+
+TEST_P(SequenceManagerTestWithMessageLoop, ShutdownTaskQueueInNestedLoop) {
+ CreateTaskQueues(1u);
+
+ // We retain a reference to the task queue even when the manager has deleted
+ // its reference.
+ scoped_refptr<TaskQueue> task_queue = CreateTaskQueue();
+
+ std::vector<bool> log;
+ std::vector<std::pair<OnceClosure, bool>> tasks_to_post_from_nested_loop;
+
+ // Inside a nested run loop, call task_queue->ShutdownTaskQueue, bookended
+ // by calls to HasOneRefTask to make sure the manager doesn't release its
+ // reference until the nested run loop exits.
+ // NB: This first HasOneRefTask is a sanity check.
+ tasks_to_post_from_nested_loop.push_back(
+ std::make_pair(BindOnce(&NopTask), true));
+ tasks_to_post_from_nested_loop.push_back(std::make_pair(
+ BindOnce(&TaskQueue::ShutdownTaskQueue, Unretained(task_queue.get())),
+ true));
+ tasks_to_post_from_nested_loop.push_back(
+ std::make_pair(BindOnce(&NopTask), true));
+ runners_[0]->PostTask(
+ FROM_HERE, BindOnce(&PostFromNestedRunloop, RetainedRef(runners_[0]),
+ Unretained(&tasks_to_post_from_nested_loop)));
+ RunLoop().RunUntilIdle();
+
+ // Just make sure that we don't crash.
+}
+
+TEST_P(SequenceManagerTest, TimeDomainsAreIndependant) {
+ CreateTaskQueues(2u);
+
+ TimeTicks start_time_ticks = manager_->NowTicks();
+ std::unique_ptr<MockTimeDomain> domain_a =
+ std::make_unique<MockTimeDomain>(start_time_ticks);
+ std::unique_ptr<MockTimeDomain> domain_b =
+ std::make_unique<MockTimeDomain>(start_time_ticks);
+ manager_->RegisterTimeDomain(domain_a.get());
+ manager_->RegisterTimeDomain(domain_b.get());
+ runners_[0]->SetTimeDomain(domain_a.get());
+ runners_[1]->SetTimeDomain(domain_b.get());
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order),
+ TimeDelta::FromMilliseconds(10));
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order),
+ TimeDelta::FromMilliseconds(20));
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order),
+ TimeDelta::FromMilliseconds(30));
+
+ runners_[1]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 4, &run_order),
+ TimeDelta::FromMilliseconds(10));
+ runners_[1]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 5, &run_order),
+ TimeDelta::FromMilliseconds(20));
+ runners_[1]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 6, &run_order),
+ TimeDelta::FromMilliseconds(30));
+
+ domain_b->SetNowTicks(start_time_ticks + TimeDelta::FromMilliseconds(50));
+ manager_->MaybeScheduleImmediateWork(FROM_HERE);
+
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(4u, 5u, 6u));
+
+ domain_a->SetNowTicks(start_time_ticks + TimeDelta::FromMilliseconds(50));
+ manager_->MaybeScheduleImmediateWork(FROM_HERE);
+
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(4u, 5u, 6u, 1u, 2u, 3u));
+
+ runners_[0]->ShutdownTaskQueue();
+ runners_[1]->ShutdownTaskQueue();
+
+ manager_->UnregisterTimeDomain(domain_a.get());
+ manager_->UnregisterTimeDomain(domain_b.get());
+}
+
+TEST_P(SequenceManagerTest, TimeDomainMigration) {
+ CreateTaskQueues(1u);
+
+ TimeTicks start_time_ticks = manager_->NowTicks();
+ std::unique_ptr<MockTimeDomain> domain_a =
+ std::make_unique<MockTimeDomain>(start_time_ticks);
+ manager_->RegisterTimeDomain(domain_a.get());
+ runners_[0]->SetTimeDomain(domain_a.get());
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order),
+ TimeDelta::FromMilliseconds(10));
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order),
+ TimeDelta::FromMilliseconds(20));
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order),
+ TimeDelta::FromMilliseconds(30));
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 4, &run_order),
+ TimeDelta::FromMilliseconds(40));
+
+ domain_a->SetNowTicks(start_time_ticks + TimeDelta::FromMilliseconds(20));
+ manager_->MaybeScheduleImmediateWork(FROM_HERE);
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u));
+
+ std::unique_ptr<MockTimeDomain> domain_b =
+ std::make_unique<MockTimeDomain>(start_time_ticks);
+ manager_->RegisterTimeDomain(domain_b.get());
+ runners_[0]->SetTimeDomain(domain_b.get());
+
+ domain_b->SetNowTicks(start_time_ticks + TimeDelta::FromMilliseconds(50));
+ manager_->MaybeScheduleImmediateWork(FROM_HERE);
+
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u, 3u, 4u));
+
+ runners_[0]->ShutdownTaskQueue();
+
+ manager_->UnregisterTimeDomain(domain_a.get());
+ manager_->UnregisterTimeDomain(domain_b.get());
+}
+
+TEST_P(SequenceManagerTest, TimeDomainMigrationWithIncomingImmediateTasks) {
+ CreateTaskQueues(1u);
+
+ TimeTicks start_time_ticks = manager_->NowTicks();
+ std::unique_ptr<MockTimeDomain> domain_a =
+ std::make_unique<MockTimeDomain>(start_time_ticks);
+ std::unique_ptr<MockTimeDomain> domain_b =
+ std::make_unique<MockTimeDomain>(start_time_ticks);
+ manager_->RegisterTimeDomain(domain_a.get());
+ manager_->RegisterTimeDomain(domain_b.get());
+
+ runners_[0]->SetTimeDomain(domain_a.get());
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ runners_[0]->SetTimeDomain(domain_b.get());
+
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1u));
+
+ runners_[0]->ShutdownTaskQueue();
+
+ manager_->UnregisterTimeDomain(domain_a.get());
+ manager_->UnregisterTimeDomain(domain_b.get());
+}
+
+TEST_P(SequenceManagerTest,
+ PostDelayedTasksReverseOrderAlternatingTimeDomains) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+
+ std::unique_ptr<internal::RealTimeDomain> domain_a =
+ std::make_unique<internal::RealTimeDomain>();
+ std::unique_ptr<internal::RealTimeDomain> domain_b =
+ std::make_unique<internal::RealTimeDomain>();
+ manager_->RegisterTimeDomain(domain_a.get());
+ manager_->RegisterTimeDomain(domain_b.get());
+
+ runners_[0]->SetTimeDomain(domain_a.get());
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order),
+ TimeDelta::FromMilliseconds(40));
+
+ runners_[0]->SetTimeDomain(domain_b.get());
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order),
+ TimeDelta::FromMilliseconds(30));
+
+ runners_[0]->SetTimeDomain(domain_a.get());
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order),
+ TimeDelta::FromMilliseconds(20));
+
+ runners_[0]->SetTimeDomain(domain_b.get());
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 4, &run_order),
+ TimeDelta::FromMilliseconds(10));
+
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(40));
+ EXPECT_THAT(run_order, ElementsAre(4u, 3u, 2u, 1u));
+
+ runners_[0]->ShutdownTaskQueue();
+
+ manager_->UnregisterTimeDomain(domain_a.get());
+ manager_->UnregisterTimeDomain(domain_b.get());
+}
+
+namespace {
+
+class MockTaskQueueObserver : public TaskQueue::Observer {
+ public:
+ ~MockTaskQueueObserver() override = default;
+
+ MOCK_METHOD2(OnQueueNextWakeUpChanged, void(TaskQueue*, TimeTicks));
+};
+
+} // namespace
+
+TEST_P(SequenceManagerTest, TaskQueueObserver_ImmediateTask) {
+ CreateTaskQueues(1u);
+
+ MockTaskQueueObserver observer;
+ runners_[0]->SetObserver(&observer);
+
+ // We should get a notification when a task is posted on an empty queue.
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(runners_[0].get(), _));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&NopTask));
+ Mock::VerifyAndClearExpectations(&observer);
+
+ // But not subsequently.
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_, _)).Times(0);
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&NopTask));
+ Mock::VerifyAndClearExpectations(&observer);
+
+ // Unless the immediate work queue is emptied.
+ runners_[0]->GetTaskQueueImpl()->ReloadImmediateWorkQueueIfEmpty();
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(runners_[0].get(), _));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&NopTask));
+
+ // Tidy up.
+ runners_[0]->ShutdownTaskQueue();
+}
+
+TEST_P(SequenceManagerTest, TaskQueueObserver_DelayedTask) {
+ CreateTaskQueues(1u);
+
+ TimeTicks start_time = manager_->NowTicks();
+ TimeDelta delay10s(TimeDelta::FromSeconds(10));
+ TimeDelta delay100s(TimeDelta::FromSeconds(100));
+ TimeDelta delay1s(TimeDelta::FromSeconds(1));
+
+ MockTaskQueueObserver observer;
+ runners_[0]->SetObserver(&observer);
+
+ // We should get a notification when a delayed task is posted on an empty
+ // queue.
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(runners_[0].get(),
+ start_time + delay10s));
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask), delay10s);
+ Mock::VerifyAndClearExpectations(&observer);
+
+ // We should not get a notification for a longer delay.
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_, _)).Times(0);
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask), delay100s);
+ Mock::VerifyAndClearExpectations(&observer);
+
+ // We should get a notification for a shorter delay.
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(runners_[0].get(),
+ start_time + delay1s));
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask), delay1s);
+ Mock::VerifyAndClearExpectations(&observer);
+
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter =
+ runners_[0]->CreateQueueEnabledVoter();
+ voter->SetQueueEnabled(false);
+ Mock::VerifyAndClearExpectations(&observer);
+
+ // When a queue has been enabled, we may get a notification if the
+ // TimeDomain's next scheduled wake-up has changed.
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(runners_[0].get(),
+ start_time + delay1s));
+ voter->SetQueueEnabled(true);
+ Mock::VerifyAndClearExpectations(&observer);
+
+ // Tidy up.
+ runners_[0]->ShutdownTaskQueue();
+}
+
+TEST_P(SequenceManagerTest, TaskQueueObserver_DelayedTaskMultipleQueues) {
+ CreateTaskQueues(2u);
+
+ MockTaskQueueObserver observer;
+ runners_[0]->SetObserver(&observer);
+ runners_[1]->SetObserver(&observer);
+
+ TimeTicks start_time = manager_->NowTicks();
+ TimeDelta delay1s(TimeDelta::FromSeconds(1));
+ TimeDelta delay10s(TimeDelta::FromSeconds(10));
+
+ EXPECT_CALL(observer,
+ OnQueueNextWakeUpChanged(runners_[0].get(), start_time + delay1s))
+ .Times(1);
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(runners_[1].get(),
+ start_time + delay10s))
+ .Times(1);
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask), delay1s);
+ runners_[1]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask), delay10s);
+ testing::Mock::VerifyAndClearExpectations(&observer);
+
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter0 =
+ runners_[0]->CreateQueueEnabledVoter();
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter1 =
+ runners_[1]->CreateQueueEnabledVoter();
+
+ // Disabling a queue should not trigger a notification.
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_, _)).Times(0);
+ voter0->SetQueueEnabled(false);
+ Mock::VerifyAndClearExpectations(&observer);
+
+ // Re-enabling it should should also trigger a notification.
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(runners_[0].get(),
+ start_time + delay1s));
+ voter0->SetQueueEnabled(true);
+ Mock::VerifyAndClearExpectations(&observer);
+
+ // Disabling a queue should not trigger a notification.
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_, _)).Times(0);
+ voter1->SetQueueEnabled(false);
+ Mock::VerifyAndClearExpectations(&observer);
+
+ // Re-enabling it should should trigger a notification.
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(runners_[1].get(),
+ start_time + delay10s));
+ voter1->SetQueueEnabled(true);
+ Mock::VerifyAndClearExpectations(&observer);
+
+ // Tidy up.
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_, _)).Times(AnyNumber());
+ runners_[0]->ShutdownTaskQueue();
+ runners_[1]->ShutdownTaskQueue();
+}
+
+TEST_P(SequenceManagerTest, TaskQueueObserver_DelayedWorkWhichCanRunNow) {
+ // This test checks that when delayed work becomes available
+ // the notification still fires. This usually happens when time advances
+ // and task becomes available in the middle of the scheduling code.
+ // For this test we rely on the fact that notification dispatching code
+ // is the same in all conditions and just change a time domain to
+ // trigger notification.
+
+ CreateTaskQueues(1u);
+
+ TimeDelta delay1s(TimeDelta::FromSeconds(1));
+ TimeDelta delay10s(TimeDelta::FromSeconds(10));
+
+ MockTaskQueueObserver observer;
+ runners_[0]->SetObserver(&observer);
+
+ // We should get a notification when a delayed task is posted on an empty
+ // queue.
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_, _));
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask), delay1s);
+ Mock::VerifyAndClearExpectations(&observer);
+
+ std::unique_ptr<TimeDomain> mock_time_domain =
+ std::make_unique<internal::RealTimeDomain>();
+ manager_->RegisterTimeDomain(mock_time_domain.get());
+
+ test_task_runner_->AdvanceMockTickClock(delay10s);
+
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_, _));
+ runners_[0]->SetTimeDomain(mock_time_domain.get());
+ Mock::VerifyAndClearExpectations(&observer);
+
+ // Tidy up.
+ runners_[0]->ShutdownTaskQueue();
+}
+
+class CancelableTask {
+ public:
+ explicit CancelableTask(const TickClock* clock)
+ : clock_(clock), weak_factory_(this) {}
+
+ void RecordTimeTask(std::vector<TimeTicks>* run_times) {
+ run_times->push_back(clock_->NowTicks());
+ }
+
+ const TickClock* clock_;
+ WeakPtrFactory<CancelableTask> weak_factory_;
+};
+
+TEST_P(SequenceManagerTest, TaskQueueObserver_SweepCanceledDelayedTasks) {
+ CreateTaskQueues(1u);
+
+ MockTaskQueueObserver observer;
+ runners_[0]->SetObserver(&observer);
+
+ TimeTicks start_time = manager_->NowTicks();
+ TimeDelta delay1(TimeDelta::FromSeconds(5));
+ TimeDelta delay2(TimeDelta::FromSeconds(10));
+
+ EXPECT_CALL(observer,
+ OnQueueNextWakeUpChanged(runners_[0].get(), start_time + delay1))
+ .Times(1);
+
+ CancelableTask task1(GetTickClock());
+ CancelableTask task2(GetTickClock());
+ std::vector<TimeTicks> run_times;
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&CancelableTask::RecordTimeTask,
+ task1.weak_factory_.GetWeakPtr(), &run_times),
+ delay1);
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&CancelableTask::RecordTimeTask,
+ task2.weak_factory_.GetWeakPtr(), &run_times),
+ delay2);
+
+ task1.weak_factory_.InvalidateWeakPtrs();
+
+ // Sweeping away canceled delayed tasks should trigger a notification.
+ EXPECT_CALL(observer,
+ OnQueueNextWakeUpChanged(runners_[0].get(), start_time + delay2))
+ .Times(1);
+ manager_->SweepCanceledDelayedTasks();
+}
+
+namespace {
+void ChromiumRunloopInspectionTask(
+ scoped_refptr<TestMockTimeTaskRunner> test_task_runner) {
+ // We don't expect more than 1 pending task at any time.
+ EXPECT_GE(1u, test_task_runner->GetPendingTaskCount());
+}
+} // namespace
+
+TEST_P(SequenceManagerTest, NumberOfPendingTasksOnChromiumRunLoop) {
+ CreateTaskQueues(1u);
+
+ // NOTE because tasks posted to the chromiumrun loop are not cancellable, we
+ // will end up with a lot more tasks posted if the delayed tasks were posted
+ // in the reverse order.
+ // TODO(alexclarke): Consider talking to the message pump directly.
+ for (int i = 1; i < 100; i++) {
+ runners_[0]->PostDelayedTask(
+ FROM_HERE, BindOnce(&ChromiumRunloopInspectionTask, test_task_runner_),
+ TimeDelta::FromMilliseconds(i));
+ }
+ test_task_runner_->FastForwardUntilNoTasksRemain();
+}
+
+namespace {
+
+class QuadraticTask {
+ public:
+ QuadraticTask(scoped_refptr<TaskQueue> task_queue,
+ TimeDelta delay,
+ scoped_refptr<TestMockTimeTaskRunner> test_task_runner)
+ : count_(0),
+ task_queue_(task_queue),
+ delay_(delay),
+ test_task_runner_(test_task_runner) {}
+
+ void SetShouldExit(RepeatingCallback<bool()> should_exit) {
+ should_exit_ = should_exit;
+ }
+
+ void Run() {
+ if (should_exit_.Run())
+ return;
+ count_++;
+ task_queue_->PostDelayedTask(
+ FROM_HERE, BindOnce(&QuadraticTask::Run, Unretained(this)), delay_);
+ task_queue_->PostDelayedTask(
+ FROM_HERE, BindOnce(&QuadraticTask::Run, Unretained(this)), delay_);
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(5));
+ }
+
+ int Count() const { return count_; }
+
+ private:
+ int count_;
+ scoped_refptr<TaskQueue> task_queue_;
+ TimeDelta delay_;
+ RepeatingCallback<bool()> should_exit_;
+ scoped_refptr<TestMockTimeTaskRunner> test_task_runner_;
+};
+
+class LinearTask {
+ public:
+ LinearTask(scoped_refptr<TaskQueue> task_queue,
+ TimeDelta delay,
+ scoped_refptr<TestMockTimeTaskRunner> test_task_runner)
+ : count_(0),
+ task_queue_(task_queue),
+ delay_(delay),
+ test_task_runner_(test_task_runner) {}
+
+ void SetShouldExit(RepeatingCallback<bool()> should_exit) {
+ should_exit_ = should_exit;
+ }
+
+ void Run() {
+ if (should_exit_.Run())
+ return;
+ count_++;
+ task_queue_->PostDelayedTask(
+ FROM_HERE, BindOnce(&LinearTask::Run, Unretained(this)), delay_);
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(5));
+ }
+
+ int Count() const { return count_; }
+
+ private:
+ int count_;
+ scoped_refptr<TaskQueue> task_queue_;
+ TimeDelta delay_;
+ RepeatingCallback<bool()> should_exit_;
+ scoped_refptr<TestMockTimeTaskRunner> test_task_runner_;
+};
+
+bool ShouldExit(QuadraticTask* quadratic_task, LinearTask* linear_task) {
+ return quadratic_task->Count() == 1000 || linear_task->Count() == 1000;
+}
+
+} // namespace
+
+TEST_P(SequenceManagerTest,
+ DelayedTasksDontBadlyStarveNonDelayedWork_SameQueue) {
+ CreateTaskQueues(1u);
+
+ QuadraticTask quadratic_delayed_task(
+ runners_[0], TimeDelta::FromMilliseconds(10), test_task_runner_);
+ LinearTask linear_immediate_task(runners_[0], TimeDelta(), test_task_runner_);
+ RepeatingCallback<bool()> should_exit = BindRepeating(
+ ShouldExit, &quadratic_delayed_task, &linear_immediate_task);
+ quadratic_delayed_task.SetShouldExit(should_exit);
+ linear_immediate_task.SetShouldExit(should_exit);
+
+ quadratic_delayed_task.Run();
+ linear_immediate_task.Run();
+
+ test_task_runner_->FastForwardUntilNoTasksRemain();
+
+ double ratio = static_cast<double>(linear_immediate_task.Count()) /
+ static_cast<double>(quadratic_delayed_task.Count());
+
+ EXPECT_GT(ratio, 0.333);
+ EXPECT_LT(ratio, 1.1);
+}
+
+TEST_P(SequenceManagerTest, ImmediateWorkCanStarveDelayedTasks_SameQueue) {
+ CreateTaskQueues(1u);
+
+ QuadraticTask quadratic_immediate_task(runners_[0], TimeDelta(),
+ test_task_runner_);
+ LinearTask linear_delayed_task(runners_[0], TimeDelta::FromMilliseconds(10),
+ test_task_runner_);
+ RepeatingCallback<bool()> should_exit = BindRepeating(
+ &ShouldExit, &quadratic_immediate_task, &linear_delayed_task);
+
+ quadratic_immediate_task.SetShouldExit(should_exit);
+ linear_delayed_task.SetShouldExit(should_exit);
+
+ quadratic_immediate_task.Run();
+ linear_delayed_task.Run();
+
+ test_task_runner_->FastForwardUntilNoTasksRemain();
+
+ double ratio = static_cast<double>(linear_delayed_task.Count()) /
+ static_cast<double>(quadratic_immediate_task.Count());
+
+ // This is by design, we want to enforce a strict ordering in task execution
+ // where by delayed tasks can not skip ahead of non-delayed work.
+ EXPECT_GT(ratio, 0.0);
+ EXPECT_LT(ratio, 0.1);
+}
+
+TEST_P(SequenceManagerTest,
+ DelayedTasksDontBadlyStarveNonDelayedWork_DifferentQueue) {
+ CreateTaskQueues(2u);
+
+ QuadraticTask quadratic_delayed_task(
+ runners_[0], TimeDelta::FromMilliseconds(10), test_task_runner_);
+ LinearTask linear_immediate_task(runners_[1], TimeDelta(), test_task_runner_);
+ RepeatingCallback<bool()> should_exit = BindRepeating(
+ ShouldExit, &quadratic_delayed_task, &linear_immediate_task);
+ quadratic_delayed_task.SetShouldExit(should_exit);
+ linear_immediate_task.SetShouldExit(should_exit);
+
+ quadratic_delayed_task.Run();
+ linear_immediate_task.Run();
+
+ test_task_runner_->FastForwardUntilNoTasksRemain();
+
+ double ratio = static_cast<double>(linear_immediate_task.Count()) /
+ static_cast<double>(quadratic_delayed_task.Count());
+
+ EXPECT_GT(ratio, 0.333);
+ EXPECT_LT(ratio, 1.1);
+}
+
+TEST_P(SequenceManagerTest, ImmediateWorkCanStarveDelayedTasks_DifferentQueue) {
+ CreateTaskQueues(2u);
+
+ QuadraticTask quadratic_immediate_task(runners_[0], TimeDelta(),
+ test_task_runner_);
+ LinearTask linear_delayed_task(runners_[1], TimeDelta::FromMilliseconds(10),
+ test_task_runner_);
+ RepeatingCallback<bool()> should_exit = BindRepeating(
+ &ShouldExit, &quadratic_immediate_task, &linear_delayed_task);
+
+ quadratic_immediate_task.SetShouldExit(should_exit);
+ linear_delayed_task.SetShouldExit(should_exit);
+
+ quadratic_immediate_task.Run();
+ linear_delayed_task.Run();
+
+ test_task_runner_->FastForwardUntilNoTasksRemain();
+
+ double ratio = static_cast<double>(linear_delayed_task.Count()) /
+ static_cast<double>(quadratic_immediate_task.Count());
+
+ // This is by design, we want to enforce a strict ordering in task execution
+ // where by delayed tasks can not skip ahead of non-delayed work.
+ EXPECT_GT(ratio, 0.0);
+ EXPECT_LT(ratio, 0.1);
+}
+
+TEST_P(SequenceManagerTest, CurrentlyExecutingTaskQueue_NoTaskRunning) {
+ CreateTaskQueues(1u);
+
+ EXPECT_EQ(nullptr, manager_->currently_executing_task_queue());
+}
+
+namespace {
+void CurrentlyExecutingTaskQueueTestTask(
+ SequenceManagerImpl* sequence_manager,
+ std::vector<internal::TaskQueueImpl*>* task_sources) {
+ task_sources->push_back(sequence_manager->currently_executing_task_queue());
+}
+} // namespace
+
+TEST_P(SequenceManagerTest, CurrentlyExecutingTaskQueue_TaskRunning) {
+ CreateTaskQueues(2u);
+
+ TestTaskQueue* queue0 = runners_[0].get();
+ TestTaskQueue* queue1 = runners_[1].get();
+
+ std::vector<internal::TaskQueueImpl*> task_sources;
+ queue0->PostTask(FROM_HERE, BindOnce(&CurrentlyExecutingTaskQueueTestTask,
+ manager_.get(), &task_sources));
+ queue1->PostTask(FROM_HERE, BindOnce(&CurrentlyExecutingTaskQueueTestTask,
+ manager_.get(), &task_sources));
+ RunLoop().RunUntilIdle();
+
+ EXPECT_THAT(task_sources, ElementsAre(queue0->GetTaskQueueImpl(),
+ queue1->GetTaskQueueImpl()));
+ EXPECT_EQ(nullptr, manager_->currently_executing_task_queue());
+}
+
+namespace {
+void RunloopCurrentlyExecutingTaskQueueTestTask(
+ SequenceManagerImpl* sequence_manager,
+ std::vector<internal::TaskQueueImpl*>* task_sources,
+ std::vector<std::pair<OnceClosure, TestTaskQueue*>>* tasks) {
+ task_sources->push_back(sequence_manager->currently_executing_task_queue());
+
+ for (std::pair<OnceClosure, TestTaskQueue*>& pair : *tasks) {
+ pair.second->PostTask(FROM_HERE, std::move(pair.first));
+ }
+
+ RunLoop(RunLoop::Type::kNestableTasksAllowed).RunUntilIdle();
+ task_sources->push_back(sequence_manager->currently_executing_task_queue());
+}
+} // namespace
+
+TEST_P(SequenceManagerTestWithMessageLoop,
+ CurrentlyExecutingTaskQueue_NestedLoop) {
+ CreateTaskQueues(3u);
+
+ TestTaskQueue* queue0 = runners_[0].get();
+ TestTaskQueue* queue1 = runners_[1].get();
+ TestTaskQueue* queue2 = runners_[2].get();
+
+ std::vector<internal::TaskQueueImpl*> task_sources;
+ std::vector<std::pair<OnceClosure, TestTaskQueue*>>
+ tasks_to_post_from_nested_loop;
+ tasks_to_post_from_nested_loop.push_back(
+ std::make_pair(BindOnce(&CurrentlyExecutingTaskQueueTestTask,
+ manager_.get(), &task_sources),
+ queue1));
+ tasks_to_post_from_nested_loop.push_back(
+ std::make_pair(BindOnce(&CurrentlyExecutingTaskQueueTestTask,
+ manager_.get(), &task_sources),
+ queue2));
+
+ queue0->PostTask(
+ FROM_HERE,
+ BindOnce(&RunloopCurrentlyExecutingTaskQueueTestTask, manager_.get(),
+ &task_sources, &tasks_to_post_from_nested_loop));
+
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(
+ task_sources,
+ ElementsAre(queue0->GetTaskQueueImpl(), queue1->GetTaskQueueImpl(),
+ queue2->GetTaskQueueImpl(), queue0->GetTaskQueueImpl()));
+ EXPECT_EQ(nullptr, manager_->currently_executing_task_queue());
+}
+
+TEST_P(SequenceManagerTestWithMessageLoop, BlameContextAttribution) {
+ using trace_analyzer::Query;
+
+ CreateTaskQueues(1u);
+ TestTaskQueue* queue = runners_[0].get();
+
+ trace_analyzer::Start("*");
+ {
+ trace_event::BlameContext blame_context("cat", "name", "type", "scope", 0,
+ nullptr);
+ blame_context.Initialize();
+ queue->SetBlameContext(&blame_context);
+ queue->PostTask(FROM_HERE, BindOnce(&NopTask));
+ RunLoop().RunUntilIdle();
+ }
+ auto analyzer = trace_analyzer::Stop();
+
+ trace_analyzer::TraceEventVector events;
+ Query q = Query::EventPhaseIs(TRACE_EVENT_PHASE_ENTER_CONTEXT) ||
+ Query::EventPhaseIs(TRACE_EVENT_PHASE_LEAVE_CONTEXT);
+ analyzer->FindEvents(q, &events);
+
+ EXPECT_EQ(2u, events.size());
+}
+
+TEST_P(SequenceManagerTest, NoWakeUpsForCanceledDelayedTasks) {
+ CreateTaskQueues(1u);
+
+ TimeTicks start_time = manager_->NowTicks();
+
+ CancelableTask task1(GetTickClock());
+ CancelableTask task2(GetTickClock());
+ CancelableTask task3(GetTickClock());
+ CancelableTask task4(GetTickClock());
+ TimeDelta delay1(TimeDelta::FromSeconds(5));
+ TimeDelta delay2(TimeDelta::FromSeconds(10));
+ TimeDelta delay3(TimeDelta::FromSeconds(15));
+ TimeDelta delay4(TimeDelta::FromSeconds(30));
+ std::vector<TimeTicks> run_times;
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&CancelableTask::RecordTimeTask,
+ task1.weak_factory_.GetWeakPtr(), &run_times),
+ delay1);
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&CancelableTask::RecordTimeTask,
+ task2.weak_factory_.GetWeakPtr(), &run_times),
+ delay2);
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&CancelableTask::RecordTimeTask,
+ task3.weak_factory_.GetWeakPtr(), &run_times),
+ delay3);
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&CancelableTask::RecordTimeTask,
+ task4.weak_factory_.GetWeakPtr(), &run_times),
+ delay4);
+
+ task2.weak_factory_.InvalidateWeakPtrs();
+ task3.weak_factory_.InvalidateWeakPtrs();
+
+ std::set<TimeTicks> wake_up_times;
+
+ RunUntilManagerIsIdle(BindRepeating(
+ [](std::set<TimeTicks>* wake_up_times, const TickClock* clock) {
+ wake_up_times->insert(clock->NowTicks());
+ },
+ &wake_up_times, GetTickClock()));
+
+ EXPECT_THAT(wake_up_times,
+ ElementsAre(start_time + delay1, start_time + delay4));
+ EXPECT_THAT(run_times, ElementsAre(start_time + delay1, start_time + delay4));
+}
+
+TEST_P(SequenceManagerTest, NoWakeUpsForCanceledDelayedTasksReversePostOrder) {
+ CreateTaskQueues(1u);
+
+ TimeTicks start_time = manager_->NowTicks();
+
+ CancelableTask task1(GetTickClock());
+ CancelableTask task2(GetTickClock());
+ CancelableTask task3(GetTickClock());
+ CancelableTask task4(GetTickClock());
+ TimeDelta delay1(TimeDelta::FromSeconds(5));
+ TimeDelta delay2(TimeDelta::FromSeconds(10));
+ TimeDelta delay3(TimeDelta::FromSeconds(15));
+ TimeDelta delay4(TimeDelta::FromSeconds(30));
+ std::vector<TimeTicks> run_times;
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&CancelableTask::RecordTimeTask,
+ task4.weak_factory_.GetWeakPtr(), &run_times),
+ delay4);
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&CancelableTask::RecordTimeTask,
+ task3.weak_factory_.GetWeakPtr(), &run_times),
+ delay3);
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&CancelableTask::RecordTimeTask,
+ task2.weak_factory_.GetWeakPtr(), &run_times),
+ delay2);
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&CancelableTask::RecordTimeTask,
+ task1.weak_factory_.GetWeakPtr(), &run_times),
+ delay1);
+
+ task2.weak_factory_.InvalidateWeakPtrs();
+ task3.weak_factory_.InvalidateWeakPtrs();
+
+ std::set<TimeTicks> wake_up_times;
+
+ RunUntilManagerIsIdle(BindRepeating(
+ [](std::set<TimeTicks>* wake_up_times, const TickClock* clock) {
+ wake_up_times->insert(clock->NowTicks());
+ },
+ &wake_up_times, GetTickClock()));
+
+ EXPECT_THAT(wake_up_times,
+ ElementsAre(start_time + delay1, start_time + delay4));
+ EXPECT_THAT(run_times, ElementsAre(start_time + delay1, start_time + delay4));
+}
+
+TEST_P(SequenceManagerTest, TimeDomainWakeUpOnlyCancelledIfAllUsesCancelled) {
+ CreateTaskQueues(1u);
+
+ TimeTicks start_time = manager_->NowTicks();
+
+ CancelableTask task1(GetTickClock());
+ CancelableTask task2(GetTickClock());
+ CancelableTask task3(GetTickClock());
+ CancelableTask task4(GetTickClock());
+ TimeDelta delay1(TimeDelta::FromSeconds(5));
+ TimeDelta delay2(TimeDelta::FromSeconds(10));
+ TimeDelta delay3(TimeDelta::FromSeconds(15));
+ TimeDelta delay4(TimeDelta::FromSeconds(30));
+ std::vector<TimeTicks> run_times;
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&CancelableTask::RecordTimeTask,
+ task1.weak_factory_.GetWeakPtr(), &run_times),
+ delay1);
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&CancelableTask::RecordTimeTask,
+ task2.weak_factory_.GetWeakPtr(), &run_times),
+ delay2);
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&CancelableTask::RecordTimeTask,
+ task3.weak_factory_.GetWeakPtr(), &run_times),
+ delay3);
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&CancelableTask::RecordTimeTask,
+ task4.weak_factory_.GetWeakPtr(), &run_times),
+ delay4);
+
+ // Post a non-canceled task with |delay3|. So we should still get a wake-up at
+ // |delay3| even though we cancel |task3|.
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&CancelableTask::RecordTimeTask, Unretained(&task3), &run_times),
+ delay3);
+
+ task2.weak_factory_.InvalidateWeakPtrs();
+ task3.weak_factory_.InvalidateWeakPtrs();
+ task1.weak_factory_.InvalidateWeakPtrs();
+
+ std::set<TimeTicks> wake_up_times;
+
+ RunUntilManagerIsIdle(BindRepeating(
+ [](std::set<TimeTicks>* wake_up_times, const TickClock* clock) {
+ wake_up_times->insert(clock->NowTicks());
+ },
+ &wake_up_times, GetTickClock()));
+
+ EXPECT_THAT(wake_up_times,
+ ElementsAre(start_time + delay1, start_time + delay3,
+ start_time + delay4));
+
+ EXPECT_THAT(run_times, ElementsAre(start_time + delay3, start_time + delay4));
+}
+
+TEST_P(SequenceManagerTest, TaskQueueVoters) {
+ CreateTaskQueues(1u);
+
+ // The task queue should be initially enabled.
+ EXPECT_TRUE(runners_[0]->IsQueueEnabled());
+
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter1 =
+ runners_[0]->CreateQueueEnabledVoter();
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter2 =
+ runners_[0]->CreateQueueEnabledVoter();
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter3 =
+ runners_[0]->CreateQueueEnabledVoter();
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter4 =
+ runners_[0]->CreateQueueEnabledVoter();
+
+ // Voters should initially vote for the queue to be enabled.
+ EXPECT_TRUE(runners_[0]->IsQueueEnabled());
+
+ // If any voter wants to disable, the queue is disabled.
+ voter1->SetQueueEnabled(false);
+ EXPECT_FALSE(runners_[0]->IsQueueEnabled());
+
+ // If the voter is deleted then the queue should be re-enabled.
+ voter1.reset();
+ EXPECT_TRUE(runners_[0]->IsQueueEnabled());
+
+ // If any of the remaining voters wants to disable, the queue should be
+ // disabled.
+ voter2->SetQueueEnabled(false);
+ EXPECT_FALSE(runners_[0]->IsQueueEnabled());
+
+ // If another queue votes to disable, nothing happens because it's already
+ // disabled.
+ voter3->SetQueueEnabled(false);
+ EXPECT_FALSE(runners_[0]->IsQueueEnabled());
+
+ // There are two votes to disable, so one of them voting to enable does
+ // nothing.
+ voter2->SetQueueEnabled(true);
+ EXPECT_FALSE(runners_[0]->IsQueueEnabled());
+
+ // IF all queues vote to enable then the queue is enabled.
+ voter3->SetQueueEnabled(true);
+ EXPECT_TRUE(runners_[0]->IsQueueEnabled());
+}
+
+TEST_P(SequenceManagerTest, ShutdownQueueBeforeEnabledVoterDeleted) {
+ CreateTaskQueues(1u);
+
+ scoped_refptr<TaskQueue> queue = CreateTaskQueue();
+
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter =
+ queue->CreateQueueEnabledVoter();
+
+ voter->SetQueueEnabled(true); // NOP
+ queue->ShutdownTaskQueue();
+
+ // This should complete without DCHECKing.
+ voter.reset();
+}
+
+TEST_P(SequenceManagerTest, ShutdownQueueBeforeDisabledVoterDeleted) {
+ CreateTaskQueues(1u);
+
+ scoped_refptr<TaskQueue> queue = CreateTaskQueue();
+
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter =
+ queue->CreateQueueEnabledVoter();
+
+ voter->SetQueueEnabled(false);
+ queue->ShutdownTaskQueue();
+
+ // This should complete without DCHECKing.
+ voter.reset();
+}
+
+TEST_P(SequenceManagerTest, SweepCanceledDelayedTasks) {
+ CreateTaskQueues(1u);
+
+ CancelableTask task1(GetTickClock());
+ CancelableTask task2(GetTickClock());
+ CancelableTask task3(GetTickClock());
+ CancelableTask task4(GetTickClock());
+ TimeDelta delay1(TimeDelta::FromSeconds(5));
+ TimeDelta delay2(TimeDelta::FromSeconds(10));
+ TimeDelta delay3(TimeDelta::FromSeconds(15));
+ TimeDelta delay4(TimeDelta::FromSeconds(30));
+ std::vector<TimeTicks> run_times;
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&CancelableTask::RecordTimeTask,
+ task1.weak_factory_.GetWeakPtr(), &run_times),
+ delay1);
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&CancelableTask::RecordTimeTask,
+ task2.weak_factory_.GetWeakPtr(), &run_times),
+ delay2);
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&CancelableTask::RecordTimeTask,
+ task3.weak_factory_.GetWeakPtr(), &run_times),
+ delay3);
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&CancelableTask::RecordTimeTask,
+ task4.weak_factory_.GetWeakPtr(), &run_times),
+ delay4);
+
+ EXPECT_EQ(4u, runners_[0]->GetNumberOfPendingTasks());
+ task2.weak_factory_.InvalidateWeakPtrs();
+ task3.weak_factory_.InvalidateWeakPtrs();
+ EXPECT_EQ(4u, runners_[0]->GetNumberOfPendingTasks());
+
+ manager_->SweepCanceledDelayedTasks();
+ EXPECT_EQ(2u, runners_[0]->GetNumberOfPendingTasks());
+
+ task1.weak_factory_.InvalidateWeakPtrs();
+ task4.weak_factory_.InvalidateWeakPtrs();
+
+ manager_->SweepCanceledDelayedTasks();
+ EXPECT_EQ(0u, runners_[0]->GetNumberOfPendingTasks());
+}
+
+TEST_P(SequenceManagerTest, DelayTillNextTask) {
+ CreateTaskQueues(2u);
+
+ LazyNow lazy_now(GetTickClock());
+ EXPECT_EQ(TimeDelta::Max(), manager_->DelayTillNextTask(&lazy_now));
+
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask),
+ TimeDelta::FromSeconds(10));
+
+ EXPECT_EQ(TimeDelta::FromSeconds(10), manager_->DelayTillNextTask(&lazy_now));
+
+ runners_[1]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask),
+ TimeDelta::FromSeconds(15));
+
+ EXPECT_EQ(TimeDelta::FromSeconds(10), manager_->DelayTillNextTask(&lazy_now));
+
+ runners_[1]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask),
+ TimeDelta::FromSeconds(5));
+
+ EXPECT_EQ(TimeDelta::FromSeconds(5), manager_->DelayTillNextTask(&lazy_now));
+
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&NopTask));
+
+ EXPECT_EQ(TimeDelta(), manager_->DelayTillNextTask(&lazy_now));
+}
+
+TEST_P(SequenceManagerTest, DelayTillNextTask_Disabled) {
+ CreateTaskQueues(1u);
+
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter =
+ runners_[0]->CreateQueueEnabledVoter();
+ voter->SetQueueEnabled(false);
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&NopTask));
+
+ LazyNow lazy_now(GetTickClock());
+ EXPECT_EQ(TimeDelta::Max(), manager_->DelayTillNextTask(&lazy_now));
+}
+
+TEST_P(SequenceManagerTest, DelayTillNextTask_Fence) {
+ CreateTaskQueues(1u);
+
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&NopTask));
+
+ LazyNow lazy_now(GetTickClock());
+ EXPECT_EQ(TimeDelta::Max(), manager_->DelayTillNextTask(&lazy_now));
+}
+
+TEST_P(SequenceManagerTest, DelayTillNextTask_FenceUnblocking) {
+ CreateTaskQueues(1u);
+
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&NopTask));
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+
+ LazyNow lazy_now(GetTickClock());
+ EXPECT_EQ(TimeDelta(), manager_->DelayTillNextTask(&lazy_now));
+}
+
+TEST_P(SequenceManagerTest, DelayTillNextTask_DelayedTaskReady) {
+ CreateTaskQueues(1u);
+
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask),
+ TimeDelta::FromSeconds(1));
+
+ test_task_runner_->AdvanceMockTickClock(TimeDelta::FromSeconds(10));
+
+ LazyNow lazy_now(GetTickClock());
+ EXPECT_EQ(TimeDelta(), manager_->DelayTillNextTask(&lazy_now));
+}
+
+namespace {
+void MessageLoopTaskWithDelayedQuit(SimpleTestTickClock* now_src,
+ scoped_refptr<TaskQueue> task_queue) {
+ RunLoop run_loop(RunLoop::Type::kNestableTasksAllowed);
+ task_queue->PostDelayedTask(FROM_HERE, run_loop.QuitClosure(),
+ TimeDelta::FromMilliseconds(100));
+ now_src->Advance(TimeDelta::FromMilliseconds(200));
+ run_loop.Run();
+}
+} // namespace
+
+TEST_P(SequenceManagerTestWithMessageLoop, DelayedTaskRunsInNestedMessageLoop) {
+ CreateTaskQueues(1u);
+ RunLoop run_loop;
+ runners_[0]->PostTask(FROM_HERE,
+ BindOnce(&MessageLoopTaskWithDelayedQuit, &mock_clock_,
+ RetainedRef(runners_[0])));
+ run_loop.RunUntilIdle();
+}
+
+namespace {
+void MessageLoopTaskWithImmediateQuit(OnceClosure non_nested_quit_closure,
+ scoped_refptr<TaskQueue> task_queue) {
+ RunLoop run_loop(RunLoop::Type::kNestableTasksAllowed);
+ // Needed because entering the nested run loop causes a DoWork to get
+ // posted.
+ task_queue->PostTask(FROM_HERE, BindOnce(&NopTask));
+ task_queue->PostTask(FROM_HERE, run_loop.QuitClosure());
+ run_loop.Run();
+ std::move(non_nested_quit_closure).Run();
+}
+} // namespace
+
+TEST_P(SequenceManagerTestWithMessageLoop,
+ DelayedNestedMessageLoopDoesntPreventTasksRunning) {
+ CreateTaskQueues(1u);
+ RunLoop run_loop;
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&MessageLoopTaskWithImmediateQuit, run_loop.QuitClosure(),
+ RetainedRef(runners_[0])),
+ TimeDelta::FromMilliseconds(100));
+
+ mock_clock_.Advance(TimeDelta::FromMilliseconds(200));
+ run_loop.Run();
+}
+
+TEST_P(SequenceManagerTest, CouldTaskRun_DisableAndReenable) {
+ CreateTaskQueues(1u);
+
+ EnqueueOrder enqueue_order = manager_->GetNextSequenceNumber();
+ EXPECT_TRUE(runners_[0]->GetTaskQueueImpl()->CouldTaskRun(enqueue_order));
+
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter =
+ runners_[0]->CreateQueueEnabledVoter();
+ voter->SetQueueEnabled(false);
+ EXPECT_FALSE(runners_[0]->GetTaskQueueImpl()->CouldTaskRun(enqueue_order));
+
+ voter->SetQueueEnabled(true);
+ EXPECT_TRUE(runners_[0]->GetTaskQueueImpl()->CouldTaskRun(enqueue_order));
+}
+
+TEST_P(SequenceManagerTest, CouldTaskRun_Fence) {
+ CreateTaskQueues(1u);
+
+ EnqueueOrder enqueue_order = manager_->GetNextSequenceNumber();
+ EXPECT_TRUE(runners_[0]->GetTaskQueueImpl()->CouldTaskRun(enqueue_order));
+
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ EXPECT_TRUE(runners_[0]->GetTaskQueueImpl()->CouldTaskRun(enqueue_order));
+
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kBeginningOfTime);
+ EXPECT_FALSE(runners_[0]->GetTaskQueueImpl()->CouldTaskRun(enqueue_order));
+
+ runners_[0]->RemoveFence();
+ EXPECT_TRUE(runners_[0]->GetTaskQueueImpl()->CouldTaskRun(enqueue_order));
+}
+
+TEST_P(SequenceManagerTest, CouldTaskRun_FenceBeforeThenAfter) {
+ CreateTaskQueues(1u);
+
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+
+ EnqueueOrder enqueue_order = manager_->GetNextSequenceNumber();
+ EXPECT_FALSE(runners_[0]->GetTaskQueueImpl()->CouldTaskRun(enqueue_order));
+
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ EXPECT_TRUE(runners_[0]->GetTaskQueueImpl()->CouldTaskRun(enqueue_order));
+}
+
+TEST_P(SequenceManagerTest, DelayedDoWorkNotPostedForDisabledQueue) {
+ CreateTaskQueues(1u);
+
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask),
+ TimeDelta::FromMilliseconds(1));
+ ASSERT_TRUE(test_task_runner_->HasPendingTask());
+ EXPECT_EQ(TimeDelta::FromMilliseconds(1),
+ test_task_runner_->NextPendingTaskDelay());
+
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter =
+ runners_[0]->CreateQueueEnabledVoter();
+ voter->SetQueueEnabled(false);
+ EXPECT_FALSE(test_task_runner_->HasPendingTask());
+
+ voter->SetQueueEnabled(true);
+ ASSERT_TRUE(test_task_runner_->HasPendingTask());
+ EXPECT_EQ(TimeDelta::FromMilliseconds(1),
+ test_task_runner_->NextPendingTaskDelay());
+}
+
+TEST_P(SequenceManagerTest, DisablingQueuesChangesDelayTillNextDoWork) {
+ CreateTaskQueues(3u);
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask),
+ TimeDelta::FromMilliseconds(1));
+ runners_[1]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask),
+ TimeDelta::FromMilliseconds(10));
+ runners_[2]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask),
+ TimeDelta::FromMilliseconds(100));
+
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter0 =
+ runners_[0]->CreateQueueEnabledVoter();
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter1 =
+ runners_[1]->CreateQueueEnabledVoter();
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter2 =
+ runners_[2]->CreateQueueEnabledVoter();
+
+ ASSERT_TRUE(test_task_runner_->HasPendingTask());
+ EXPECT_EQ(TimeDelta::FromMilliseconds(1),
+ test_task_runner_->NextPendingTaskDelay());
+
+ voter0->SetQueueEnabled(false);
+ ASSERT_TRUE(test_task_runner_->HasPendingTask());
+ EXPECT_EQ(TimeDelta::FromMilliseconds(10),
+ test_task_runner_->NextPendingTaskDelay());
+
+ voter1->SetQueueEnabled(false);
+ ASSERT_TRUE(test_task_runner_->HasPendingTask());
+ EXPECT_EQ(TimeDelta::FromMilliseconds(100),
+ test_task_runner_->NextPendingTaskDelay());
+
+ voter2->SetQueueEnabled(false);
+ EXPECT_FALSE(test_task_runner_->HasPendingTask());
+}
+
+TEST_P(SequenceManagerTest, GetNextScheduledWakeUp) {
+ CreateTaskQueues(1u);
+
+ EXPECT_EQ(nullopt, runners_[0]->GetNextScheduledWakeUp());
+
+ TimeTicks start_time = manager_->NowTicks();
+ TimeDelta delay1 = TimeDelta::FromMilliseconds(10);
+ TimeDelta delay2 = TimeDelta::FromMilliseconds(2);
+
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask), delay1);
+ EXPECT_EQ(start_time + delay1, runners_[0]->GetNextScheduledWakeUp());
+
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask), delay2);
+ EXPECT_EQ(start_time + delay2, runners_[0]->GetNextScheduledWakeUp());
+
+ // We don't have wake-ups scheduled for disabled queues.
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter =
+ runners_[0]->CreateQueueEnabledVoter();
+ voter->SetQueueEnabled(false);
+ EXPECT_EQ(nullopt, runners_[0]->GetNextScheduledWakeUp());
+
+ voter->SetQueueEnabled(true);
+ EXPECT_EQ(start_time + delay2, runners_[0]->GetNextScheduledWakeUp());
+
+ // Immediate tasks shouldn't make any difference.
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&NopTask));
+ EXPECT_EQ(start_time + delay2, runners_[0]->GetNextScheduledWakeUp());
+
+ // Neither should fences.
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kBeginningOfTime);
+ EXPECT_EQ(start_time + delay2, runners_[0]->GetNextScheduledWakeUp());
+}
+
+TEST_P(SequenceManagerTest, SetTimeDomainForDisabledQueue) {
+ CreateTaskQueues(1u);
+
+ MockTaskQueueObserver observer;
+ runners_[0]->SetObserver(&observer);
+
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask),
+ TimeDelta::FromMilliseconds(1));
+
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter =
+ runners_[0]->CreateQueueEnabledVoter();
+ voter->SetQueueEnabled(false);
+
+ // We should not get a notification for a disabled queue.
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_, _)).Times(0);
+
+ std::unique_ptr<MockTimeDomain> domain =
+ std::make_unique<MockTimeDomain>(manager_->NowTicks());
+ manager_->RegisterTimeDomain(domain.get());
+ runners_[0]->SetTimeDomain(domain.get());
+
+ // Tidy up.
+ runners_[0]->ShutdownTaskQueue();
+ manager_->UnregisterTimeDomain(domain.get());
+}
+
+namespace {
+void SetOnTaskHandlers(scoped_refptr<TestTaskQueue> task_queue,
+ int* start_counter,
+ int* complete_counter) {
+ task_queue->GetTaskQueueImpl()->SetOnTaskStartedHandler(BindRepeating(
+ [](int* counter, const TaskQueue::Task& task,
+ const TaskQueue::TaskTiming& task_timing) { ++(*counter); },
+ start_counter));
+ task_queue->GetTaskQueueImpl()->SetOnTaskCompletedHandler(BindRepeating(
+ [](int* counter, const TaskQueue::Task& task,
+ const TaskQueue::TaskTiming& task_timing) { ++(*counter); },
+ complete_counter));
+}
+
+void UnsetOnTaskHandlers(scoped_refptr<TestTaskQueue> task_queue) {
+ task_queue->GetTaskQueueImpl()->SetOnTaskStartedHandler(
+ internal::TaskQueueImpl::OnTaskStartedHandler());
+ task_queue->GetTaskQueueImpl()->SetOnTaskCompletedHandler(
+ internal::TaskQueueImpl::OnTaskStartedHandler());
+}
+} // namespace
+
+TEST_P(SequenceManagerTest, ProcessTasksWithoutTaskTimeObservers) {
+ CreateTaskQueues(1u);
+ int start_counter = 0;
+ int complete_counter = 0;
+ std::vector<EnqueueOrder> run_order;
+ SetOnTaskHandlers(runners_[0], &start_counter, &complete_counter);
+ EXPECT_TRUE(runners_[0]->GetTaskQueueImpl()->RequiresTaskTiming());
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order));
+
+ RunLoop().RunUntilIdle();
+ EXPECT_EQ(start_counter, 3);
+ EXPECT_EQ(complete_counter, 3);
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u, 3u));
+
+ UnsetOnTaskHandlers(runners_[0]);
+ EXPECT_FALSE(runners_[0]->GetTaskQueueImpl()->RequiresTaskTiming());
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 4, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 5, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 6, &run_order));
+
+ RunLoop().RunUntilIdle();
+ EXPECT_EQ(start_counter, 3);
+ EXPECT_EQ(complete_counter, 3);
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u, 3u, 4u, 5u, 6u));
+}
+
+TEST_P(SequenceManagerTest, ProcessTasksWithTaskTimeObservers) {
+ CreateTaskQueues(1u);
+ int start_counter = 0;
+ int complete_counter = 0;
+
+ manager_->AddTaskTimeObserver(&test_task_time_observer_);
+ SetOnTaskHandlers(runners_[0], &start_counter, &complete_counter);
+ EXPECT_TRUE(runners_[0]->GetTaskQueueImpl()->RequiresTaskTiming());
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order));
+
+ RunLoop().RunUntilIdle();
+ EXPECT_EQ(start_counter, 2);
+ EXPECT_EQ(complete_counter, 2);
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u));
+
+ UnsetOnTaskHandlers(runners_[0]);
+ EXPECT_FALSE(runners_[0]->GetTaskQueueImpl()->RequiresTaskTiming());
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 4, &run_order));
+
+ RunLoop().RunUntilIdle();
+ EXPECT_EQ(start_counter, 2);
+ EXPECT_EQ(complete_counter, 2);
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u, 3u, 4u));
+
+ manager_->RemoveTaskTimeObserver(&test_task_time_observer_);
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 5, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 6, &run_order));
+
+ RunLoop().RunUntilIdle();
+ EXPECT_EQ(start_counter, 2);
+ EXPECT_EQ(complete_counter, 2);
+ EXPECT_FALSE(runners_[0]->GetTaskQueueImpl()->RequiresTaskTiming());
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u, 3u, 4u, 5u, 6u));
+
+ SetOnTaskHandlers(runners_[0], &start_counter, &complete_counter);
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 7, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 8, &run_order));
+
+ RunLoop().RunUntilIdle();
+ EXPECT_EQ(start_counter, 4);
+ EXPECT_EQ(complete_counter, 4);
+ EXPECT_TRUE(runners_[0]->GetTaskQueueImpl()->RequiresTaskTiming());
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u, 3u, 4u, 5u, 6u, 7u, 8u));
+ UnsetOnTaskHandlers(runners_[0]);
+}
+
+TEST_P(SequenceManagerTest, GracefulShutdown) {
+ std::vector<TimeTicks> run_times;
+ scoped_refptr<TestTaskQueue> main_tq = CreateTaskQueue();
+ WeakPtr<TestTaskQueue> main_tq_weak_ptr = main_tq->GetWeakPtr();
+
+ EXPECT_EQ(1u, manager_->ActiveQueuesCount());
+ EXPECT_EQ(0u, manager_->QueuesToShutdownCount());
+ EXPECT_EQ(0u, manager_->QueuesToDeleteCount());
+
+ for (int i = 1; i <= 5; ++i) {
+ main_tq->PostDelayedTask(
+ FROM_HERE, BindOnce(&RecordTimeTask, &run_times, GetTickClock()),
+ TimeDelta::FromMilliseconds(i * 100));
+ }
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(250));
+
+ main_tq = nullptr;
+ // Ensure that task queue went away.
+ EXPECT_FALSE(main_tq_weak_ptr.get());
+
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(1));
+
+ EXPECT_EQ(1u, manager_->ActiveQueuesCount());
+ EXPECT_EQ(1u, manager_->QueuesToShutdownCount());
+ EXPECT_EQ(0u, manager_->QueuesToDeleteCount());
+
+ test_task_runner_->FastForwardUntilNoTasksRemain();
+
+ // Even with TaskQueue gone, tasks are executed.
+ EXPECT_THAT(run_times,
+ ElementsAre(start_time_ + TimeDelta::FromMilliseconds(100),
+ start_time_ + TimeDelta::FromMilliseconds(200),
+ start_time_ + TimeDelta::FromMilliseconds(300),
+ start_time_ + TimeDelta::FromMilliseconds(400),
+ start_time_ + TimeDelta::FromMilliseconds(500)));
+
+ EXPECT_EQ(0u, manager_->ActiveQueuesCount());
+ EXPECT_EQ(0u, manager_->QueuesToShutdownCount());
+ EXPECT_EQ(0u, manager_->QueuesToDeleteCount());
+}
+
+TEST_P(SequenceManagerTest, GracefulShutdown_ManagerDeletedInFlight) {
+ std::vector<TimeTicks> run_times;
+ scoped_refptr<TestTaskQueue> control_tq = CreateTaskQueue();
+ std::vector<scoped_refptr<TestTaskQueue>> main_tqs;
+ std::vector<WeakPtr<TestTaskQueue>> main_tq_weak_ptrs;
+
+ // There might be a race condition - async task queues should be unregistered
+ // first. Increase the number of task queues to surely detect that.
+ // The problem is that pointers are compared in a set and generally for
+ // a small number of allocations value of the pointers increases
+ // monotonically. 100 is large enough to force allocations from different
+ // pages.
+ const int N = 100;
+ for (int i = 0; i < N; ++i) {
+ scoped_refptr<TestTaskQueue> tq = CreateTaskQueue();
+ main_tq_weak_ptrs.push_back(tq->GetWeakPtr());
+ main_tqs.push_back(std::move(tq));
+ }
+
+ for (int i = 1; i <= 5; ++i) {
+ main_tqs[0]->PostDelayedTask(
+ FROM_HERE, BindOnce(&RecordTimeTask, &run_times, GetTickClock()),
+ TimeDelta::FromMilliseconds(i * 100));
+ }
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(250));
+
+ main_tqs.clear();
+ // Ensure that task queues went away.
+ for (int i = 0; i < N; ++i) {
+ EXPECT_FALSE(main_tq_weak_ptrs[i].get());
+ }
+
+ // No leaks should occur when TQM was destroyed before processing
+ // shutdown task and TaskQueueImpl should be safely deleted on a correct
+ // thread.
+ manager_.reset();
+
+ test_task_runner_->FastForwardUntilNoTasksRemain();
+
+ EXPECT_THAT(run_times,
+ ElementsAre(start_time_ + TimeDelta::FromMilliseconds(100),
+ start_time_ + TimeDelta::FromMilliseconds(200)));
+}
+
+TEST_P(SequenceManagerTest,
+ GracefulShutdown_ManagerDeletedWithQueuesToShutdown) {
+ std::vector<TimeTicks> run_times;
+ scoped_refptr<TestTaskQueue> main_tq = CreateTaskQueue();
+ WeakPtr<TestTaskQueue> main_tq_weak_ptr = main_tq->GetWeakPtr();
+
+ EXPECT_EQ(1u, manager_->ActiveQueuesCount());
+ EXPECT_EQ(0u, manager_->QueuesToShutdownCount());
+ EXPECT_EQ(0u, manager_->QueuesToDeleteCount());
+
+ for (int i = 1; i <= 5; ++i) {
+ main_tq->PostDelayedTask(
+ FROM_HERE, BindOnce(&RecordTimeTask, &run_times, GetTickClock()),
+ TimeDelta::FromMilliseconds(i * 100));
+ }
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(250));
+
+ main_tq = nullptr;
+ // Ensure that task queue went away.
+ EXPECT_FALSE(main_tq_weak_ptr.get());
+
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(1));
+
+ EXPECT_EQ(1u, manager_->ActiveQueuesCount());
+ EXPECT_EQ(1u, manager_->QueuesToShutdownCount());
+ EXPECT_EQ(0u, manager_->QueuesToDeleteCount());
+
+ // Ensure that all queues-to-gracefully-shutdown are properly unregistered.
+ manager_.reset();
+
+ test_task_runner_->FastForwardUntilNoTasksRemain();
+
+ EXPECT_THAT(run_times,
+ ElementsAre(start_time_ + TimeDelta::FromMilliseconds(100),
+ start_time_ + TimeDelta::FromMilliseconds(200)));
+}
+
+TEST_P(SequenceManagerTestWithCustomInitialization, DefaultTaskRunnerSupport) {
+ MessageLoop message_loop;
+ scoped_refptr<SingleThreadTaskRunner> original_task_runner =
+ message_loop.task_runner();
+ scoped_refptr<SingleThreadTaskRunner> custom_task_runner =
+ MakeRefCounted<TestSimpleTaskRunner>();
+ {
+ std::unique_ptr<SequenceManagerForTest> manager =
+ SequenceManagerForTest::Create(&message_loop,
+ message_loop.task_runner(), nullptr);
+ manager->SetDefaultTaskRunner(custom_task_runner);
+ DCHECK_EQ(custom_task_runner, message_loop.task_runner());
+ }
+ DCHECK_EQ(original_task_runner, message_loop.task_runner());
+}
+
+TEST_P(SequenceManagerTest, CanceledTasksInQueueCantMakeOtherTasksSkipAhead) {
+ CreateTaskQueues(2u);
+
+ CancelableTask task1(GetTickClock());
+ CancelableTask task2(GetTickClock());
+ std::vector<TimeTicks> run_times;
+
+ runners_[0]->PostTask(FROM_HERE,
+ BindOnce(&CancelableTask::RecordTimeTask,
+ task1.weak_factory_.GetWeakPtr(), &run_times));
+ runners_[0]->PostTask(FROM_HERE,
+ BindOnce(&CancelableTask::RecordTimeTask,
+ task2.weak_factory_.GetWeakPtr(), &run_times));
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[1]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order));
+
+ task1.weak_factory_.InvalidateWeakPtrs();
+ task2.weak_factory_.InvalidateWeakPtrs();
+ RunLoop().RunUntilIdle();
+
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u));
+}
+
+TEST_P(SequenceManagerTest, TaskQueueDeletedOnAnotherThread) {
+ std::vector<TimeTicks> run_times;
+ scoped_refptr<TestTaskQueue> main_tq = CreateTaskQueue();
+
+ int start_counter = 0;
+ int complete_counter = 0;
+ SetOnTaskHandlers(main_tq, &start_counter, &complete_counter);
+
+ EXPECT_EQ(1u, manager_->ActiveQueuesCount());
+ EXPECT_EQ(0u, manager_->QueuesToShutdownCount());
+ EXPECT_EQ(0u, manager_->QueuesToDeleteCount());
+
+ for (int i = 1; i <= 5; ++i) {
+ main_tq->PostDelayedTask(
+ FROM_HERE, BindOnce(&RecordTimeTask, &run_times, GetTickClock()),
+ TimeDelta::FromMilliseconds(i * 100));
+ }
+
+ // TODO(altimin): do not do this after switching to weak pointer-based
+ // task handlers.
+ UnsetOnTaskHandlers(main_tq);
+
+ WaitableEvent task_queue_deleted(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ std::unique_ptr<Thread> thread = std::make_unique<Thread>("test thread");
+ thread->StartAndWaitForTesting();
+
+ thread->task_runner()->PostTask(
+ FROM_HERE, BindOnce(
+ [](scoped_refptr<SingleThreadTaskRunner> task_queue,
+ WaitableEvent* task_queue_deleted) {
+ task_queue = nullptr;
+ task_queue_deleted->Signal();
+ },
+ std::move(main_tq), &task_queue_deleted));
+ task_queue_deleted.Wait();
+
+ EXPECT_EQ(1u, manager_->ActiveQueuesCount());
+ EXPECT_EQ(1u, manager_->QueuesToShutdownCount());
+ EXPECT_EQ(0u, manager_->QueuesToDeleteCount());
+
+ test_task_runner_->FastForwardUntilNoTasksRemain();
+
+ // Even with TaskQueue gone, tasks are executed.
+ EXPECT_THAT(run_times,
+ ElementsAre(start_time_ + TimeDelta::FromMilliseconds(100),
+ start_time_ + TimeDelta::FromMilliseconds(200),
+ start_time_ + TimeDelta::FromMilliseconds(300),
+ start_time_ + TimeDelta::FromMilliseconds(400),
+ start_time_ + TimeDelta::FromMilliseconds(500)));
+
+ EXPECT_EQ(0u, manager_->ActiveQueuesCount());
+ EXPECT_EQ(0u, manager_->QueuesToShutdownCount());
+ EXPECT_EQ(0u, manager_->QueuesToDeleteCount());
+
+ thread->Stop();
+}
+
+namespace {
+
+class RunOnDestructionHelper {
+ public:
+ explicit RunOnDestructionHelper(base::OnceClosure task)
+ : task_(std::move(task)) {}
+
+ ~RunOnDestructionHelper() { std::move(task_).Run(); }
+
+ private:
+ base::OnceClosure task_;
+};
+
+base::OnceClosure RunOnDestruction(base::OnceClosure task) {
+ return base::BindOnce(
+ [](std::unique_ptr<RunOnDestructionHelper>) {},
+ base::Passed(std::make_unique<RunOnDestructionHelper>(std::move(task))));
+}
+
+base::OnceClosure PostOnDestructon(scoped_refptr<TestTaskQueue> task_queue,
+ base::OnceClosure task) {
+ return RunOnDestruction(base::BindOnce(
+ [](base::OnceClosure task, scoped_refptr<TestTaskQueue> task_queue) {
+ task_queue->PostTask(FROM_HERE, std::move(task));
+ },
+ base::Passed(std::move(task)), task_queue));
+}
+
+} // namespace
+
+TEST_P(SequenceManagerTest, TaskQueueUsedInTaskDestructorAfterShutdown) {
+ // This test checks that when a task is posted to a shutdown queue and
+ // destroyed, it can try to post a task to the same queue without deadlocks.
+ scoped_refptr<TestTaskQueue> main_tq = CreateTaskQueue();
+
+ WaitableEvent test_executed(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ std::unique_ptr<Thread> thread = std::make_unique<Thread>("test thread");
+ thread->StartAndWaitForTesting();
+
+ manager_.reset();
+
+ thread->task_runner()->PostTask(
+ FROM_HERE, BindOnce(
+ [](scoped_refptr<TestTaskQueue> task_queue,
+ WaitableEvent* test_executed) {
+ task_queue->PostTask(
+ FROM_HERE, PostOnDestructon(
+ task_queue, base::BindOnce([]() {})));
+ test_executed->Signal();
+ },
+ main_tq, &test_executed));
+ test_executed.Wait();
+}
+
+TEST_P(SequenceManagerTest, DestructorPostChainDuringShutdown) {
+ // Checks that a chain of closures which post other closures on destruction do
+ // thing on shutdown.
+ scoped_refptr<TestTaskQueue> task_queue = CreateTaskQueue();
+ bool run = false;
+ task_queue->PostTask(
+ FROM_HERE,
+ PostOnDestructon(
+ task_queue,
+ PostOnDestructon(task_queue,
+ RunOnDestruction(base::BindOnce(
+ [](bool* run) { *run = true; }, &run)))));
+
+ manager_.reset();
+
+ EXPECT_TRUE(run);
+}
+
+} // namespace sequence_manager_impl_unittest
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
diff --git a/chromium/base/task/sequence_manager/sequence_manager_perftest.cc b/chromium/base/task/sequence_manager/sequence_manager_perftest.cc
new file mode 100644
index 00000000000..c5cd1a00f18
--- /dev/null
+++ b/chromium/base/task/sequence_manager/sequence_manager_perftest.cc
@@ -0,0 +1,306 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/sequence_manager.h"
+
+#include <stddef.h>
+#include <memory>
+
+#include "base/bind.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "base/strings/stringprintf.h"
+#include "base/task/sequence_manager/task_queue_impl.h"
+#include "base/task/sequence_manager/test/mock_time_domain.h"
+#include "base/task/sequence_manager/test/sequence_manager_for_test.h"
+#include "base/task/sequence_manager/test/test_task_queue.h"
+#include "base/task/sequence_manager/test/test_task_time_observer.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/time/default_tick_clock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/perf/perf_test.h"
+
+namespace base {
+namespace sequence_manager {
+
+// To reduce noise related to the OS timer, we use a mock time domain to
+// fast forward the timers.
+class PerfTestTimeDomain : public MockTimeDomain {
+ public:
+ PerfTestTimeDomain() : MockTimeDomain(TimeTicks::Now()) {}
+ ~PerfTestTimeDomain() override = default;
+
+ Optional<TimeDelta> DelayTillNextTask(LazyNow* lazy_now) override {
+ Optional<TimeTicks> run_time = NextScheduledRunTime();
+ if (!run_time)
+ return nullopt;
+ SetNowTicks(*run_time);
+ // Makes SequenceManager to continue immediately.
+ return TimeDelta();
+ }
+
+ void SetNextDelayedDoWork(LazyNow* lazy_now, TimeTicks run_time) override {
+ // De-dupe DoWorks.
+ if (NumberOfScheduledWakeUps() == 1u)
+ RequestDoWork();
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(PerfTestTimeDomain);
+};
+
+class SequenceManagerPerfTest : public testing::Test {
+ public:
+ SequenceManagerPerfTest()
+ : num_queues_(0),
+ max_tasks_in_flight_(0),
+ num_tasks_in_flight_(0),
+ num_tasks_to_post_(0),
+ num_tasks_to_run_(0) {}
+
+ void SetUp() override {
+ if (ThreadTicks::IsSupported())
+ ThreadTicks::WaitUntilInitialized();
+ }
+
+ void TearDown() override {
+ queues_.clear();
+ manager_->UnregisterTimeDomain(time_domain_.get());
+ manager_.reset();
+ }
+
+ void Initialize(size_t num_queues) {
+ num_queues_ = num_queues;
+ message_loop_.reset(new MessageLoop());
+ manager_ = SequenceManagerForTest::Create(message_loop_.get(),
+ message_loop_->task_runner(),
+ DefaultTickClock::GetInstance());
+ manager_->AddTaskTimeObserver(&test_task_time_observer_);
+
+ time_domain_.reset(new PerfTestTimeDomain());
+ manager_->RegisterTimeDomain(time_domain_.get());
+
+ for (size_t i = 0; i < num_queues; i++) {
+ queues_.push_back(manager_->CreateTaskQueue<TestTaskQueue>(
+ TaskQueue::Spec("test").SetTimeDomain(time_domain_.get())));
+ }
+
+ delayed_task_closure_ = BindRepeating(
+ &SequenceManagerPerfTest::TestDelayedTask, Unretained(this));
+
+ immediate_task_closure_ = BindRepeating(
+ &SequenceManagerPerfTest::TestImmediateTask, Unretained(this));
+ }
+
+ void TestDelayedTask() {
+ if (--num_tasks_to_run_ == 0) {
+ run_loop_->QuitWhenIdle();
+ return;
+ }
+
+ num_tasks_in_flight_--;
+ // NOTE there are only up to max_tasks_in_flight_ pending delayed tasks at
+ // any one time. Thanks to the lower_num_tasks_to_post going to zero if
+ // there are a lot of tasks in flight, the total number of task in flight at
+ // any one time is very variable.
+ unsigned int lower_num_tasks_to_post =
+ num_tasks_in_flight_ < (max_tasks_in_flight_ / 2) ? 1 : 0;
+ unsigned int max_tasks_to_post =
+ num_tasks_to_post_ % 2 ? lower_num_tasks_to_post : 10;
+ for (unsigned int i = 0;
+ i < max_tasks_to_post && num_tasks_in_flight_ < max_tasks_in_flight_ &&
+ num_tasks_to_post_ > 0;
+ i++) {
+ // Choose a queue weighted towards queue 0.
+ unsigned int queue = num_tasks_to_post_ % (num_queues_ + 1);
+ if (queue == num_queues_) {
+ queue = 0;
+ }
+ // Simulate a mix of short and longer delays.
+ unsigned int delay =
+ num_tasks_to_post_ % 2 ? 1 : (10 + num_tasks_to_post_ % 10);
+ queues_[queue]->PostDelayedTask(FROM_HERE, delayed_task_closure_,
+ TimeDelta::FromMilliseconds(delay));
+ num_tasks_in_flight_++;
+ num_tasks_to_post_--;
+ }
+ }
+
+ void TestImmediateTask() {
+ if (--num_tasks_to_run_ == 0) {
+ run_loop_->QuitWhenIdle();
+ return;
+ }
+
+ num_tasks_in_flight_--;
+ // NOTE there are only up to max_tasks_in_flight_ pending delayed tasks at
+ // any one time. Thanks to the lower_num_tasks_to_post going to zero if
+ // there are a lot of tasks in flight, the total number of task in flight at
+ // any one time is very variable.
+ unsigned int lower_num_tasks_to_post =
+ num_tasks_in_flight_ < (max_tasks_in_flight_ / 2) ? 1 : 0;
+ unsigned int max_tasks_to_post =
+ num_tasks_to_post_ % 2 ? lower_num_tasks_to_post : 10;
+ for (unsigned int i = 0;
+ i < max_tasks_to_post && num_tasks_in_flight_ < max_tasks_in_flight_ &&
+ num_tasks_to_post_ > 0;
+ i++) {
+ // Choose a queue weighted towards queue 0.
+ unsigned int queue = num_tasks_to_post_ % (num_queues_ + 1);
+ if (queue == num_queues_) {
+ queue = 0;
+ }
+ queues_[queue]->PostTask(FROM_HERE, immediate_task_closure_);
+ num_tasks_in_flight_++;
+ num_tasks_to_post_--;
+ }
+ }
+
+ void ResetAndCallTestDelayedTask(unsigned int num_tasks_to_run) {
+ num_tasks_in_flight_ = 1;
+ num_tasks_to_post_ = num_tasks_to_run;
+ num_tasks_to_run_ = num_tasks_to_run;
+ TestDelayedTask();
+ }
+
+ void ResetAndCallTestImmediateTask(unsigned int num_tasks_to_run) {
+ num_tasks_in_flight_ = 1;
+ num_tasks_to_post_ = num_tasks_to_run;
+ num_tasks_to_run_ = num_tasks_to_run;
+ TestImmediateTask();
+ }
+
+ void Benchmark(const std::string& trace, const RepeatingClosure& test_task) {
+ ThreadTicks start = ThreadTicks::Now();
+ ThreadTicks now;
+ unsigned long long num_iterations = 0;
+ do {
+ test_task.Run();
+ run_loop_.reset(new RunLoop());
+ run_loop_->Run();
+ now = ThreadTicks::Now();
+ num_iterations++;
+ } while (now - start < TimeDelta::FromSeconds(5));
+ perf_test::PrintResult(
+ "task", "", trace,
+ (now - start).InMicroseconds() / static_cast<double>(num_iterations),
+ "us/run", true);
+ }
+
+ size_t num_queues_;
+ unsigned int max_tasks_in_flight_;
+ unsigned int num_tasks_in_flight_;
+ unsigned int num_tasks_to_post_;
+ unsigned int num_tasks_to_run_;
+ std::unique_ptr<MessageLoop> message_loop_;
+ std::unique_ptr<SequenceManager> manager_;
+ std::unique_ptr<RunLoop> run_loop_;
+ std::unique_ptr<TimeDomain> time_domain_;
+ std::vector<scoped_refptr<SingleThreadTaskRunner>> queues_;
+ RepeatingClosure delayed_task_closure_;
+ RepeatingClosure immediate_task_closure_;
+ // TODO(alexclarke): parameterize so we can measure with and without a
+ // TaskTimeObserver.
+ TestTaskTimeObserver test_task_time_observer_;
+};
+
+TEST_F(SequenceManagerPerfTest, RunTenThousandDelayedTasks_OneQueue) {
+ if (!ThreadTicks::IsSupported())
+ return;
+ Initialize(1u);
+
+ max_tasks_in_flight_ = 200;
+ Benchmark("run 10000 delayed tasks with one queue",
+ BindRepeating(&SequenceManagerPerfTest::ResetAndCallTestDelayedTask,
+ Unretained(this), 10000));
+}
+
+TEST_F(SequenceManagerPerfTest, RunTenThousandDelayedTasks_FourQueues) {
+ if (!ThreadTicks::IsSupported())
+ return;
+ Initialize(4u);
+
+ max_tasks_in_flight_ = 200;
+ Benchmark("run 10000 delayed tasks with four queues",
+ BindRepeating(&SequenceManagerPerfTest::ResetAndCallTestDelayedTask,
+ Unretained(this), 10000));
+}
+
+TEST_F(SequenceManagerPerfTest, RunTenThousandDelayedTasks_EightQueues) {
+ if (!ThreadTicks::IsSupported())
+ return;
+ Initialize(8u);
+
+ max_tasks_in_flight_ = 200;
+ Benchmark("run 10000 delayed tasks with eight queues",
+ BindRepeating(&SequenceManagerPerfTest::ResetAndCallTestDelayedTask,
+ Unretained(this), 10000));
+}
+
+TEST_F(SequenceManagerPerfTest, RunTenThousandDelayedTasks_ThirtyTwoQueues) {
+ if (!ThreadTicks::IsSupported())
+ return;
+ Initialize(32u);
+
+ max_tasks_in_flight_ = 200;
+ Benchmark("run 10000 delayed tasks with thirty two queues",
+ BindRepeating(&SequenceManagerPerfTest::ResetAndCallTestDelayedTask,
+ Unretained(this), 10000));
+}
+
+TEST_F(SequenceManagerPerfTest, RunTenThousandImmediateTasks_OneQueue) {
+ if (!ThreadTicks::IsSupported())
+ return;
+ Initialize(1u);
+
+ max_tasks_in_flight_ = 200;
+ Benchmark(
+ "run 10000 immediate tasks with one queue",
+ BindRepeating(&SequenceManagerPerfTest::ResetAndCallTestImmediateTask,
+ Unretained(this), 10000));
+}
+
+TEST_F(SequenceManagerPerfTest, RunTenThousandImmediateTasks_FourQueues) {
+ if (!ThreadTicks::IsSupported())
+ return;
+ Initialize(4u);
+
+ max_tasks_in_flight_ = 200;
+ Benchmark(
+ "run 10000 immediate tasks with four queues",
+ BindRepeating(&SequenceManagerPerfTest::ResetAndCallTestImmediateTask,
+ Unretained(this), 10000));
+}
+
+TEST_F(SequenceManagerPerfTest, RunTenThousandImmediateTasks_EightQueues) {
+ if (!ThreadTicks::IsSupported())
+ return;
+ Initialize(8u);
+
+ max_tasks_in_flight_ = 200;
+ Benchmark(
+ "run 10000 immediate tasks with eight queues",
+ BindRepeating(&SequenceManagerPerfTest::ResetAndCallTestImmediateTask,
+ Unretained(this), 10000));
+}
+
+TEST_F(SequenceManagerPerfTest, RunTenThousandImmediateTasks_ThirtyTwoQueues) {
+ if (!ThreadTicks::IsSupported())
+ return;
+ Initialize(32u);
+
+ max_tasks_in_flight_ = 200;
+ Benchmark(
+ "run 10000 immediate tasks with thirty two queues",
+ BindRepeating(&SequenceManagerPerfTest::ResetAndCallTestImmediateTask,
+ Unretained(this), 10000));
+}
+
+// TODO(alexclarke): Add additional tests with different mixes of non-delayed vs
+// delayed tasks.
+
+} // namespace sequence_manager
+} // namespace base
diff --git a/chromium/base/task/sequence_manager/task_queue.cc b/chromium/base/task/sequence_manager/task_queue.cc
new file mode 100644
index 00000000000..2d3d1525d65
--- /dev/null
+++ b/chromium/base/task/sequence_manager/task_queue.cc
@@ -0,0 +1,289 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/task_queue.h"
+
+#include "base/bind.h"
+#include "base/task/sequence_manager/sequence_manager_impl.h"
+#include "base/task/sequence_manager/task_queue_impl.h"
+#include "base/time/time.h"
+
+namespace base {
+namespace sequence_manager {
+
+TaskQueue::TaskQueue(std::unique_ptr<internal::TaskQueueImpl> impl,
+ const TaskQueue::Spec& spec)
+ : impl_(std::move(impl)),
+ thread_id_(PlatformThread::CurrentId()),
+ sequence_manager_(impl_ ? impl_->GetSequenceManagerWeakPtr() : nullptr),
+ graceful_queue_shutdown_helper_(
+ impl_ ? impl_->GetGracefulQueueShutdownHelper() : nullptr) {}
+
+TaskQueue::~TaskQueue() {
+ // scoped_refptr guarantees us that this object isn't used.
+ if (!impl_)
+ return;
+ if (impl_->IsUnregistered())
+ return;
+ graceful_queue_shutdown_helper_->GracefullyShutdownTaskQueue(
+ TakeTaskQueueImpl());
+}
+
+TaskQueue::Task::Task(TaskQueue::PostedTask task, TimeTicks desired_run_time)
+ : PendingTask(task.posted_from,
+ std::move(task.callback),
+ desired_run_time,
+ task.nestable),
+ task_type_(task.task_type) {}
+
+TaskQueue::TaskTiming::TaskTiming(bool has_wall_time, bool has_thread_time)
+ : has_wall_time_(has_wall_time), has_thread_time_(has_thread_time) {}
+
+void TaskQueue::TaskTiming::RecordTaskStart(LazyNow* now) {
+ if (has_wall_time())
+ start_time_ = now->Now();
+ if (has_thread_time())
+ start_thread_time_ = base::ThreadTicks::Now();
+}
+
+void TaskQueue::TaskTiming::RecordTaskEnd(LazyNow* now) {
+ if (has_wall_time())
+ end_time_ = now->Now();
+ if (has_thread_time())
+ end_thread_time_ = base::ThreadTicks::Now();
+}
+
+TaskQueue::PostedTask::PostedTask(OnceClosure callback,
+ Location posted_from,
+ TimeDelta delay,
+ Nestable nestable,
+ int task_type)
+ : callback(std::move(callback)),
+ posted_from(posted_from),
+ delay(delay),
+ nestable(nestable),
+ task_type(task_type) {}
+
+TaskQueue::PostedTask::PostedTask(PostedTask&& move_from)
+ : callback(std::move(move_from.callback)),
+ posted_from(move_from.posted_from),
+ delay(move_from.delay),
+ nestable(move_from.nestable),
+ task_type(move_from.task_type) {}
+
+TaskQueue::PostedTask::~PostedTask() = default;
+
+void TaskQueue::ShutdownTaskQueue() {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ AutoLock lock(impl_lock_);
+ if (!impl_)
+ return;
+ if (!sequence_manager_) {
+ impl_.reset();
+ return;
+ }
+ impl_->SetBlameContext(nullptr);
+ impl_->SetOnTaskStartedHandler(
+ internal::TaskQueueImpl::OnTaskStartedHandler());
+ impl_->SetOnTaskCompletedHandler(
+ internal::TaskQueueImpl::OnTaskCompletedHandler());
+ sequence_manager_->UnregisterTaskQueueImpl(TakeTaskQueueImpl());
+}
+
+bool TaskQueue::RunsTasksInCurrentSequence() const {
+ return IsOnMainThread();
+}
+
+bool TaskQueue::PostDelayedTask(const Location& from_here,
+ OnceClosure task,
+ TimeDelta delay) {
+ return PostTaskWithMetadata(
+ PostedTask(std::move(task), from_here, delay, Nestable::kNestable));
+}
+
+bool TaskQueue::PostNonNestableDelayedTask(const Location& from_here,
+ OnceClosure task,
+ TimeDelta delay) {
+ return PostTaskWithMetadata(
+ PostedTask(std::move(task), from_here, delay, Nestable::kNonNestable));
+}
+
+bool TaskQueue::PostTaskWithMetadata(PostedTask task) {
+ Optional<MoveableAutoLock> lock = AcquireImplReadLockIfNeeded();
+ if (!impl_)
+ return false;
+ internal::TaskQueueImpl::PostTaskResult result(
+ impl_->PostDelayedTask(std::move(task)));
+ if (result.success)
+ return true;
+ // If posting task was unsuccessful then |result| will contain
+ // the original task which should be destructed outside of the lock.
+ lock = nullopt;
+ // Task gets implicitly destructed here.
+ return false;
+}
+
+std::unique_ptr<TaskQueue::QueueEnabledVoter>
+TaskQueue::CreateQueueEnabledVoter() {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return nullptr;
+ return impl_->CreateQueueEnabledVoter(this);
+}
+
+bool TaskQueue::IsQueueEnabled() const {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return false;
+ return impl_->IsQueueEnabled();
+}
+
+bool TaskQueue::IsEmpty() const {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return true;
+ return impl_->IsEmpty();
+}
+
+size_t TaskQueue::GetNumberOfPendingTasks() const {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return 0;
+ return impl_->GetNumberOfPendingTasks();
+}
+
+bool TaskQueue::HasTaskToRunImmediately() const {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return false;
+ return impl_->HasTaskToRunImmediately();
+}
+
+Optional<TimeTicks> TaskQueue::GetNextScheduledWakeUp() {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return nullopt;
+ return impl_->GetNextScheduledWakeUp();
+}
+
+void TaskQueue::SetQueuePriority(TaskQueue::QueuePriority priority) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return;
+ impl_->SetQueuePriority(priority);
+}
+
+TaskQueue::QueuePriority TaskQueue::GetQueuePriority() const {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return TaskQueue::QueuePriority::kLowPriority;
+ return impl_->GetQueuePriority();
+}
+
+void TaskQueue::AddTaskObserver(MessageLoop::TaskObserver* task_observer) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return;
+ impl_->AddTaskObserver(task_observer);
+}
+
+void TaskQueue::RemoveTaskObserver(MessageLoop::TaskObserver* task_observer) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return;
+ impl_->RemoveTaskObserver(task_observer);
+}
+
+void TaskQueue::SetTimeDomain(TimeDomain* time_domain) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return;
+ impl_->SetTimeDomain(time_domain);
+}
+
+TimeDomain* TaskQueue::GetTimeDomain() const {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return nullptr;
+ return impl_->GetTimeDomain();
+}
+
+void TaskQueue::SetBlameContext(trace_event::BlameContext* blame_context) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return;
+ impl_->SetBlameContext(blame_context);
+}
+
+void TaskQueue::InsertFence(InsertFencePosition position) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return;
+ impl_->InsertFence(position);
+}
+
+void TaskQueue::InsertFenceAt(TimeTicks time) {
+ impl_->InsertFenceAt(time);
+}
+
+void TaskQueue::RemoveFence() {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return;
+ impl_->RemoveFence();
+}
+
+bool TaskQueue::HasActiveFence() {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return false;
+ return impl_->HasActiveFence();
+}
+
+bool TaskQueue::BlockedByFence() const {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return false;
+ return impl_->BlockedByFence();
+}
+
+const char* TaskQueue::GetName() const {
+ auto lock = AcquireImplReadLockIfNeeded();
+ if (!impl_)
+ return "";
+ return impl_->GetName();
+}
+
+void TaskQueue::SetObserver(Observer* observer) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return;
+ if (observer) {
+ // Observer is guaranteed to outlive TaskQueue and TaskQueueImpl lifecycle
+ // is controlled by |this|.
+ impl_->SetOnNextWakeUpChangedCallback(
+ BindRepeating(&TaskQueue::Observer::OnQueueNextWakeUpChanged,
+ Unretained(observer), Unretained(this)));
+ } else {
+ impl_->SetOnNextWakeUpChangedCallback(RepeatingCallback<void(TimeTicks)>());
+ }
+}
+
+bool TaskQueue::IsOnMainThread() const {
+ return thread_id_ == PlatformThread::CurrentId();
+}
+
+Optional<MoveableAutoLock> TaskQueue::AcquireImplReadLockIfNeeded() const {
+ if (IsOnMainThread())
+ return nullopt;
+ return MoveableAutoLock(impl_lock_);
+}
+
+std::unique_ptr<internal::TaskQueueImpl> TaskQueue::TakeTaskQueueImpl() {
+ DCHECK(impl_);
+ return std::move(impl_);
+}
+
+} // namespace sequence_manager
+} // namespace base
diff --git a/chromium/base/task/sequence_manager/task_queue.h b/chromium/base/task/sequence_manager/task_queue.h
new file mode 100644
index 00000000000..af6b4dd5da5
--- /dev/null
+++ b/chromium/base/task/sequence_manager/task_queue.h
@@ -0,0 +1,368 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_H_
+#define BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_H_
+
+#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "base/optional.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/lock.h"
+#include "base/task/sequence_manager/lazy_now.h"
+#include "base/task/sequence_manager/moveable_auto_lock.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
+
+namespace base {
+
+namespace trace_event {
+class BlameContext;
+}
+
+namespace sequence_manager {
+
+namespace internal {
+class GracefulQueueShutdownHelper;
+class SequenceManagerImpl;
+class TaskQueueImpl;
+} // namespace internal
+
+class TimeDomain;
+
+class BASE_EXPORT TaskQueue : public SingleThreadTaskRunner {
+ public:
+ class Observer {
+ public:
+ virtual ~Observer() = default;
+
+ // Notify observer that the time at which this queue wants to run
+ // the next task has changed. |next_wakeup| can be in the past
+ // (e.g. TimeTicks() can be used to notify about immediate work).
+ // Can be called on any thread
+ // All methods but SetObserver, SetTimeDomain and GetTimeDomain can be
+ // called on |queue|.
+ //
+ // TODO(altimin): Make it Optional<TimeTicks> to tell
+ // observer about cancellations.
+ virtual void OnQueueNextWakeUpChanged(TaskQueue* queue,
+ TimeTicks next_wake_up) = 0;
+ };
+
+ // A wrapper around OnceClosure with additional metadata to be passed
+ // to PostTask and plumbed until PendingTask is created.
+ struct BASE_EXPORT PostedTask {
+ PostedTask(OnceClosure callback,
+ Location posted_from,
+ TimeDelta delay = TimeDelta(),
+ Nestable nestable = Nestable::kNestable,
+ int task_type = 0);
+ PostedTask(PostedTask&& move_from);
+ PostedTask(const PostedTask& copy_from) = delete;
+ ~PostedTask();
+
+ OnceClosure callback;
+ Location posted_from;
+ TimeDelta delay;
+ Nestable nestable;
+ int task_type;
+ };
+
+ // Prepare the task queue to get released.
+ // All tasks posted after this call will be discarded.
+ virtual void ShutdownTaskQueue();
+
+ // TODO(scheduler-dev): Could we define a more clear list of priorities?
+ // See https://crbug.com/847858.
+ enum QueuePriority {
+ // Queues with control priority will run before any other queue, and will
+ // explicitly starve other queues. Typically this should only be used for
+ // private queues which perform control operations.
+ kControlPriority,
+
+ // The selector will prioritize highest over high, normal and low; and
+ // high over normal and low; and normal over low. However it will ensure
+ // neither of the lower priority queues can be completely starved by higher
+ // priority tasks. All three of these queues will always take priority over
+ // and can starve the best effort queue.
+ kHighestPriority,
+
+ kHighPriority,
+
+ // Queues with normal priority are the default.
+ kNormalPriority,
+ kLowPriority,
+
+ // Queues with best effort priority will only be run if all other queues are
+ // empty. They can be starved by the other queues.
+ kBestEffortPriority,
+ // Must be the last entry.
+ kQueuePriorityCount,
+ kFirstQueuePriority = kControlPriority,
+ };
+
+ // Can be called on any thread.
+ static const char* PriorityToString(QueuePriority priority);
+
+ // Options for constructing a TaskQueue.
+ struct Spec {
+ explicit Spec(const char* name)
+ : name(name),
+ should_monitor_quiescence(false),
+ time_domain(nullptr),
+ should_notify_observers(true) {}
+
+ Spec SetShouldMonitorQuiescence(bool should_monitor) {
+ should_monitor_quiescence = should_monitor;
+ return *this;
+ }
+
+ Spec SetShouldNotifyObservers(bool run_observers) {
+ should_notify_observers = run_observers;
+ return *this;
+ }
+
+ Spec SetTimeDomain(TimeDomain* domain) {
+ time_domain = domain;
+ return *this;
+ }
+
+ const char* name;
+ bool should_monitor_quiescence;
+ TimeDomain* time_domain;
+ bool should_notify_observers;
+ };
+
+ // Interface to pass per-task metadata to RendererScheduler.
+ class BASE_EXPORT Task : public PendingTask {
+ public:
+ Task(PostedTask posted_task, TimeTicks desired_run_time);
+
+ int task_type() const { return task_type_; }
+
+ private:
+ int task_type_;
+ };
+
+ // Information about task execution.
+ //
+ // Wall-time related methods (start_time, end_time, wall_duration) can be
+ // called only when |has_wall_time()| is true.
+ // Thread-time related mehtods (start_thread_time, end_thread_time,
+ // thread_duration) can be called only when |has_thread_time()| is true.
+ //
+ // start_* should be called after RecordTaskStart.
+ // end_* and *_duration should be called after RecordTaskEnd.
+ class BASE_EXPORT TaskTiming {
+ public:
+ TaskTiming(bool has_wall_time, bool has_thread_time);
+
+ bool has_wall_time() const { return has_wall_time_; }
+ bool has_thread_time() const { return has_thread_time_; }
+
+ base::TimeTicks start_time() const {
+ DCHECK(has_wall_time());
+ return start_time_;
+ }
+ base::TimeTicks end_time() const {
+ DCHECK(has_wall_time());
+ return end_time_;
+ }
+ base::TimeDelta wall_duration() const {
+ DCHECK(has_wall_time());
+ return end_time_ - start_time_;
+ }
+ base::ThreadTicks start_thread_time() const {
+ DCHECK(has_thread_time());
+ return start_thread_time_;
+ }
+ base::ThreadTicks end_thread_time() const {
+ DCHECK(has_thread_time());
+ return end_thread_time_;
+ }
+ base::TimeDelta thread_duration() const {
+ DCHECK(has_thread_time());
+ return end_thread_time_ - start_thread_time_;
+ }
+
+ void RecordTaskStart(LazyNow* now);
+ void RecordTaskEnd(LazyNow* now);
+
+ // Protected for tests.
+ protected:
+ bool has_wall_time_;
+ bool has_thread_time_;
+
+ base::TimeTicks start_time_;
+ base::TimeTicks end_time_;
+ base::ThreadTicks start_thread_time_;
+ base::ThreadTicks end_thread_time_;
+ };
+
+ // An interface that lets the owner vote on whether or not the associated
+ // TaskQueue should be enabled.
+ class QueueEnabledVoter {
+ public:
+ QueueEnabledVoter() = default;
+ virtual ~QueueEnabledVoter() = default;
+
+ // Votes to enable or disable the associated TaskQueue. The TaskQueue will
+ // only be enabled if all the voters agree it should be enabled, or if there
+ // are no voters.
+ // NOTE this must be called on the thread the associated TaskQueue was
+ // created on.
+ virtual void SetQueueEnabled(bool enabled) = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(QueueEnabledVoter);
+ };
+
+ // Returns an interface that allows the caller to vote on whether or not this
+ // TaskQueue is enabled. The TaskQueue will be enabled if there are no voters
+ // or if all agree it should be enabled.
+ // NOTE this must be called on the thread this TaskQueue was created by.
+ std::unique_ptr<QueueEnabledVoter> CreateQueueEnabledVoter();
+
+ // NOTE this must be called on the thread this TaskQueue was created by.
+ bool IsQueueEnabled() const;
+
+ // Returns true if the queue is completely empty.
+ bool IsEmpty() const;
+
+ // Returns the number of pending tasks in the queue.
+ size_t GetNumberOfPendingTasks() const;
+
+ // Returns true if the queue has work that's ready to execute now.
+ // NOTE: this must be called on the thread this TaskQueue was created by.
+ bool HasTaskToRunImmediately() const;
+
+ // Returns requested run time of next scheduled wake-up for a delayed task
+ // which is not ready to run. If there are no such tasks (immediate tasks
+ // don't count) or the queue is disabled it returns nullopt.
+ // NOTE: this must be called on the thread this TaskQueue was created by.
+ Optional<TimeTicks> GetNextScheduledWakeUp();
+
+ // Can be called on any thread.
+ virtual const char* GetName() const;
+
+ // Set the priority of the queue to |priority|. NOTE this must be called on
+ // the thread this TaskQueue was created by.
+ void SetQueuePriority(QueuePriority priority);
+
+ // Returns the current queue priority.
+ QueuePriority GetQueuePriority() const;
+
+ // These functions can only be called on the same thread that the task queue
+ // manager executes its tasks on.
+ void AddTaskObserver(MessageLoop::TaskObserver* task_observer);
+ void RemoveTaskObserver(MessageLoop::TaskObserver* task_observer);
+
+ // Set the blame context which is entered and left while executing tasks from
+ // this task queue. |blame_context| must be null or outlive this task queue.
+ // Must be called on the thread this TaskQueue was created by.
+ void SetBlameContext(trace_event::BlameContext* blame_context);
+
+ // Removes the task queue from the previous TimeDomain and adds it to
+ // |domain|. This is a moderately expensive operation.
+ void SetTimeDomain(TimeDomain* domain);
+
+ // Returns the queue's current TimeDomain. Can be called from any thread.
+ TimeDomain* GetTimeDomain() const;
+
+ enum class InsertFencePosition {
+ kNow, // Tasks posted on the queue up till this point further may run.
+ // All further tasks are blocked.
+ kBeginningOfTime, // No tasks posted on this queue may run.
+ };
+
+ // Inserts a barrier into the task queue which prevents tasks with an enqueue
+ // order greater than the fence from running until either the fence has been
+ // removed or a subsequent fence has unblocked some tasks within the queue.
+ // Note: delayed tasks get their enqueue order set once their delay has
+ // expired, and non-delayed tasks get their enqueue order set when posted.
+ //
+ // Fences come in three flavours:
+ // - Regular (InsertFence(NOW)) - all tasks posted after this moment
+ // are blocked.
+ // - Fully blocking (InsertFence(kBeginningOfTime)) - all tasks including
+ // already posted are blocked.
+ // - Delayed (InsertFenceAt(timestamp)) - blocks all tasks posted after given
+ // point in time (must be in the future).
+ //
+ // Only one fence can be scheduled at a time. Inserting a new fence
+ // will automatically remove the previous one, regardless of fence type.
+ void InsertFence(InsertFencePosition position);
+ void InsertFenceAt(TimeTicks time);
+
+ // Removes any previously added fence and unblocks execution of any tasks
+ // blocked by it.
+ void RemoveFence();
+
+ // Returns true if the queue has a fence but it isn't necessarily blocking
+ // execution of tasks (it may be the case if tasks enqueue order hasn't
+ // reached the number set for a fence).
+ bool HasActiveFence();
+
+ // Returns true if the queue has a fence which is blocking execution of tasks.
+ bool BlockedByFence() const;
+
+ void SetObserver(Observer* observer);
+
+ // SingleThreadTaskRunner implementation
+ bool RunsTasksInCurrentSequence() const override;
+ bool PostDelayedTask(const Location& from_here,
+ OnceClosure task,
+ TimeDelta delay) override;
+ bool PostNonNestableDelayedTask(const Location& from_here,
+ OnceClosure task,
+ TimeDelta delay) override;
+
+ bool PostTaskWithMetadata(PostedTask task);
+
+ protected:
+ TaskQueue(std::unique_ptr<internal::TaskQueueImpl> impl,
+ const TaskQueue::Spec& spec);
+ ~TaskQueue() override;
+
+ internal::TaskQueueImpl* GetTaskQueueImpl() const { return impl_.get(); }
+
+ private:
+ friend class internal::SequenceManagerImpl;
+ friend class internal::TaskQueueImpl;
+
+ bool IsOnMainThread() const;
+
+ Optional<MoveableAutoLock> AcquireImplReadLockIfNeeded() const;
+
+ // TaskQueue has ownership of an underlying implementation but in certain
+ // cases (e.g. detached frames) their lifetime may diverge.
+ // This method should be used to take away the impl for graceful shutdown.
+ // TaskQueue will disregard any calls or posting tasks thereafter.
+ std::unique_ptr<internal::TaskQueueImpl> TakeTaskQueueImpl();
+
+ // |impl_| can be written to on the main thread but can be read from
+ // any thread.
+ // |impl_lock_| must be acquired when writing to |impl_| or when accessing
+ // it from non-main thread. Reading from the main thread does not require
+ // a lock.
+ mutable Lock impl_lock_;
+ std::unique_ptr<internal::TaskQueueImpl> impl_;
+
+ const PlatformThreadId thread_id_;
+
+ const WeakPtr<internal::SequenceManagerImpl> sequence_manager_;
+
+ const scoped_refptr<internal::GracefulQueueShutdownHelper>
+ graceful_queue_shutdown_helper_;
+
+ THREAD_CHECKER(main_thread_checker_);
+
+ DISALLOW_COPY_AND_ASSIGN(TaskQueue);
+};
+
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_H_
diff --git a/chromium/base/task/sequence_manager/task_queue_impl.cc b/chromium/base/task/sequence_manager/task_queue_impl.cc
new file mode 100644
index 00000000000..250e8c438c3
--- /dev/null
+++ b/chromium/base/task/sequence_manager/task_queue_impl.cc
@@ -0,0 +1,1016 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/task_queue_impl.h"
+
+#include <memory>
+#include <utility>
+
+#include "base/strings/stringprintf.h"
+#include "base/task/sequence_manager/sequence_manager_impl.h"
+#include "base/task/sequence_manager/time_domain.h"
+#include "base/task/sequence_manager/work_queue.h"
+#include "base/time/time.h"
+#include "base/trace_event/blame_context.h"
+
+namespace base {
+namespace sequence_manager {
+
+// static
+const char* TaskQueue::PriorityToString(TaskQueue::QueuePriority priority) {
+ switch (priority) {
+ case kControlPriority:
+ return "control";
+ case kHighestPriority:
+ return "highest";
+ case kHighPriority:
+ return "high";
+ case kNormalPriority:
+ return "normal";
+ case kLowPriority:
+ return "low";
+ case kBestEffortPriority:
+ return "best_effort";
+ default:
+ NOTREACHED();
+ return nullptr;
+ }
+}
+
+namespace internal {
+
+TaskQueueImpl::TaskQueueImpl(SequenceManagerImpl* sequence_manager,
+ TimeDomain* time_domain,
+ const TaskQueue::Spec& spec)
+ : name_(spec.name),
+ thread_id_(PlatformThread::CurrentId()),
+ any_thread_(sequence_manager, time_domain),
+ main_thread_only_(sequence_manager, this, time_domain),
+ should_monitor_quiescence_(spec.should_monitor_quiescence),
+ should_notify_observers_(spec.should_notify_observers) {
+ DCHECK(time_domain);
+}
+
+TaskQueueImpl::~TaskQueueImpl() {
+#if DCHECK_IS_ON()
+ AutoLock lock(any_thread_lock_);
+ // NOTE this check shouldn't fire because |SequenceManagerImpl::queues_|
+ // contains a strong reference to this TaskQueueImpl and the
+ // SequenceManagerImpl destructor calls UnregisterTaskQueue on all task
+ // queues.
+ DCHECK(!any_thread().sequence_manager)
+ << "UnregisterTaskQueue must be called first!";
+#endif
+}
+
+TaskQueueImpl::PostTaskResult::PostTaskResult()
+ : success(false), task(OnceClosure(), Location()) {}
+
+TaskQueueImpl::PostTaskResult::PostTaskResult(bool success,
+ TaskQueue::PostedTask task)
+ : success(success), task(std::move(task)) {}
+
+TaskQueueImpl::PostTaskResult::PostTaskResult(PostTaskResult&& move_from)
+ : success(move_from.success), task(std::move(move_from.task)) {}
+
+TaskQueueImpl::PostTaskResult::~PostTaskResult() = default;
+
+TaskQueueImpl::PostTaskResult TaskQueueImpl::PostTaskResult::Success() {
+ return PostTaskResult(true, TaskQueue::PostedTask(OnceClosure(), Location()));
+}
+
+TaskQueueImpl::PostTaskResult TaskQueueImpl::PostTaskResult::Fail(
+ TaskQueue::PostedTask task) {
+ return PostTaskResult(false, std::move(task));
+}
+
+TaskQueueImpl::Task::Task(TaskQueue::PostedTask task,
+ TimeTicks desired_run_time,
+ EnqueueOrder sequence_number)
+ : TaskQueue::Task(std::move(task), desired_run_time) {
+ // It might wrap around to a negative number but it's handled properly.
+ sequence_num = static_cast<int>(sequence_number);
+}
+
+TaskQueueImpl::Task::Task(TaskQueue::PostedTask task,
+ TimeTicks desired_run_time,
+ EnqueueOrder sequence_number,
+ EnqueueOrder enqueue_order)
+ : TaskQueue::Task(std::move(task), desired_run_time),
+ enqueue_order_(enqueue_order) {
+ // It might wrap around to a negative number but it's handled properly.
+ sequence_num = static_cast<int>(sequence_number);
+}
+
+TaskQueueImpl::AnyThread::AnyThread(SequenceManagerImpl* sequence_manager,
+ TimeDomain* time_domain)
+ : sequence_manager(sequence_manager), time_domain(time_domain) {}
+
+TaskQueueImpl::AnyThread::~AnyThread() = default;
+
+TaskQueueImpl::MainThreadOnly::MainThreadOnly(
+ SequenceManagerImpl* sequence_manager,
+ TaskQueueImpl* task_queue,
+ TimeDomain* time_domain)
+ : sequence_manager(sequence_manager),
+ time_domain(time_domain),
+ delayed_work_queue(
+ new WorkQueue(task_queue, "delayed", WorkQueue::QueueType::kDelayed)),
+ immediate_work_queue(new WorkQueue(task_queue,
+ "immediate",
+ WorkQueue::QueueType::kImmediate)),
+ set_index(0),
+ is_enabled_refcount(0),
+ voter_refcount(0),
+ blame_context(nullptr),
+ is_enabled_for_test(true) {}
+
+TaskQueueImpl::MainThreadOnly::~MainThreadOnly() = default;
+
+void TaskQueueImpl::UnregisterTaskQueue() {
+ TaskDeque immediate_incoming_queue;
+
+ {
+ AutoLock lock(any_thread_lock_);
+ AutoLock immediate_incoming_queue_lock(immediate_incoming_queue_lock_);
+
+ if (main_thread_only().time_domain)
+ main_thread_only().time_domain->UnregisterQueue(this);
+
+ if (!any_thread().sequence_manager)
+ return;
+
+ main_thread_only().on_task_completed_handler = OnTaskCompletedHandler();
+ any_thread().time_domain = nullptr;
+ main_thread_only().time_domain = nullptr;
+
+ any_thread().sequence_manager = nullptr;
+ main_thread_only().sequence_manager = nullptr;
+ any_thread().on_next_wake_up_changed_callback =
+ OnNextWakeUpChangedCallback();
+ main_thread_only().on_next_wake_up_changed_callback =
+ OnNextWakeUpChangedCallback();
+ immediate_incoming_queue.swap(immediate_incoming_queue_);
+ }
+
+ // It is possible for a task to hold a scoped_refptr to this, which
+ // will lead to TaskQueueImpl destructor being called when deleting a task.
+ // To avoid use-after-free, we need to clear all fields of a task queue
+ // before starting to delete the tasks.
+ // All work queues and priority queues containing tasks should be moved to
+ // local variables on stack (std::move for unique_ptrs and swap for queues)
+ // before clearing them and deleting tasks.
+
+ // Flush the queues outside of the lock because TSAN complains about a lock
+ // order inversion for tasks that are posted from within a lock, with a
+ // destructor that acquires the same lock.
+
+ std::priority_queue<Task> delayed_incoming_queue;
+ delayed_incoming_queue.swap(main_thread_only().delayed_incoming_queue);
+
+ std::unique_ptr<WorkQueue> immediate_work_queue =
+ std::move(main_thread_only().immediate_work_queue);
+ std::unique_ptr<WorkQueue> delayed_work_queue =
+ std::move(main_thread_only().delayed_work_queue);
+}
+
+const char* TaskQueueImpl::GetName() const {
+ return name_;
+}
+
+bool TaskQueueImpl::RunsTasksInCurrentSequence() const {
+ return PlatformThread::CurrentId() == thread_id_;
+}
+
+TaskQueueImpl::PostTaskResult TaskQueueImpl::PostDelayedTask(
+ TaskQueue::PostedTask task) {
+ if (task.delay.is_zero())
+ return PostImmediateTaskImpl(std::move(task));
+
+ return PostDelayedTaskImpl(std::move(task));
+}
+
+TaskQueueImpl::PostTaskResult TaskQueueImpl::PostImmediateTaskImpl(
+ TaskQueue::PostedTask task) {
+ // Use CHECK instead of DCHECK to crash earlier. See http://crbug.com/711167
+ // for details.
+ CHECK(task.callback);
+ AutoLock lock(any_thread_lock_);
+ if (!any_thread().sequence_manager)
+ return PostTaskResult::Fail(std::move(task));
+
+ EnqueueOrder sequence_number =
+ any_thread().sequence_manager->GetNextSequenceNumber();
+
+ PushOntoImmediateIncomingQueueLocked(Task(std::move(task),
+ any_thread().time_domain->Now(),
+ sequence_number, sequence_number));
+ return PostTaskResult::Success();
+}
+
+TaskQueueImpl::PostTaskResult TaskQueueImpl::PostDelayedTaskImpl(
+ TaskQueue::PostedTask task) {
+ // Use CHECK instead of DCHECK to crash earlier. See http://crbug.com/711167
+ // for details.
+ CHECK(task.callback);
+ DCHECK_GT(task.delay, TimeDelta());
+ if (PlatformThread::CurrentId() == thread_id_) {
+ // Lock-free fast path for delayed tasks posted from the main thread.
+ if (!main_thread_only().sequence_manager)
+ return PostTaskResult::Fail(std::move(task));
+
+ EnqueueOrder sequence_number =
+ main_thread_only().sequence_manager->GetNextSequenceNumber();
+
+ TimeTicks time_domain_now = main_thread_only().time_domain->Now();
+ TimeTicks time_domain_delayed_run_time = time_domain_now + task.delay;
+ PushOntoDelayedIncomingQueueFromMainThread(
+ Task(std::move(task), time_domain_delayed_run_time, sequence_number),
+ time_domain_now);
+ } else {
+ // NOTE posting a delayed task from a different thread is not expected to
+ // be common. This pathway is less optimal than perhaps it could be
+ // because it causes two main thread tasks to be run. Should this
+ // assumption prove to be false in future, we may need to revisit this.
+ AutoLock lock(any_thread_lock_);
+ if (!any_thread().sequence_manager)
+ return PostTaskResult::Fail(std::move(task));
+
+ EnqueueOrder sequence_number =
+ any_thread().sequence_manager->GetNextSequenceNumber();
+
+ TimeTicks time_domain_now = any_thread().time_domain->Now();
+ TimeTicks time_domain_delayed_run_time = time_domain_now + task.delay;
+ PushOntoDelayedIncomingQueueLocked(
+ Task(std::move(task), time_domain_delayed_run_time, sequence_number));
+ }
+ return PostTaskResult::Success();
+}
+
+void TaskQueueImpl::PushOntoDelayedIncomingQueueFromMainThread(
+ Task pending_task,
+ TimeTicks now) {
+ main_thread_only().sequence_manager->WillQueueTask(&pending_task);
+ main_thread_only().delayed_incoming_queue.push(std::move(pending_task));
+
+ LazyNow lazy_now(now);
+ UpdateDelayedWakeUp(&lazy_now);
+
+ TraceQueueSize();
+}
+
+void TaskQueueImpl::PushOntoDelayedIncomingQueueLocked(Task pending_task) {
+ any_thread().sequence_manager->WillQueueTask(&pending_task);
+
+ EnqueueOrder thread_hop_task_sequence_number =
+ any_thread().sequence_manager->GetNextSequenceNumber();
+ // TODO(altimin): Add a copy method to Task to capture metadata here.
+ PushOntoImmediateIncomingQueueLocked(Task(
+ TaskQueue::PostedTask(BindOnce(&TaskQueueImpl::ScheduleDelayedWorkTask,
+ Unretained(this), std::move(pending_task)),
+ FROM_HERE, TimeDelta(), Nestable::kNonNestable,
+ pending_task.task_type()),
+ TimeTicks(), thread_hop_task_sequence_number,
+ thread_hop_task_sequence_number));
+}
+
+void TaskQueueImpl::ScheduleDelayedWorkTask(Task pending_task) {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ TimeTicks delayed_run_time = pending_task.delayed_run_time;
+ TimeTicks time_domain_now = main_thread_only().time_domain->Now();
+ if (delayed_run_time <= time_domain_now) {
+ // If |delayed_run_time| is in the past then push it onto the work queue
+ // immediately. To ensure the right task ordering we need to temporarily
+ // push it onto the |delayed_incoming_queue|.
+ delayed_run_time = time_domain_now;
+ pending_task.delayed_run_time = time_domain_now;
+ main_thread_only().delayed_incoming_queue.push(std::move(pending_task));
+ LazyNow lazy_now(time_domain_now);
+ WakeUpForDelayedWork(&lazy_now);
+ } else {
+ // If |delayed_run_time| is in the future we can queue it as normal.
+ PushOntoDelayedIncomingQueueFromMainThread(std::move(pending_task),
+ time_domain_now);
+ }
+ TraceQueueSize();
+}
+
+void TaskQueueImpl::PushOntoImmediateIncomingQueueLocked(Task task) {
+ // If the |immediate_incoming_queue| is empty we need a DoWork posted to make
+ // it run.
+ bool was_immediate_incoming_queue_empty;
+
+ EnqueueOrder sequence_number = task.enqueue_order();
+ TimeTicks desired_run_time = task.delayed_run_time;
+
+ {
+ AutoLock lock(immediate_incoming_queue_lock_);
+ was_immediate_incoming_queue_empty = immediate_incoming_queue().empty();
+ any_thread().sequence_manager->WillQueueTask(&task);
+ immediate_incoming_queue().push_back(std::move(task));
+ }
+
+ if (was_immediate_incoming_queue_empty) {
+ // However there's no point posting a DoWork for a blocked queue. NB we can
+ // only tell if it's disabled from the main thread.
+ bool queue_is_blocked =
+ RunsTasksInCurrentSequence() &&
+ (!IsQueueEnabled() || main_thread_only().current_fence);
+ any_thread().sequence_manager->OnQueueHasIncomingImmediateWork(
+ this, sequence_number, queue_is_blocked);
+ if (!any_thread().on_next_wake_up_changed_callback.is_null())
+ any_thread().on_next_wake_up_changed_callback.Run(desired_run_time);
+ }
+
+ TraceQueueSize();
+}
+
+void TaskQueueImpl::ReloadImmediateWorkQueueIfEmpty() {
+ if (!main_thread_only().immediate_work_queue->Empty())
+ return;
+
+ main_thread_only().immediate_work_queue->ReloadEmptyImmediateQueue();
+}
+
+void TaskQueueImpl::ReloadEmptyImmediateQueue(TaskDeque* queue) {
+ DCHECK(queue->empty());
+
+ AutoLock immediate_incoming_queue_lock(immediate_incoming_queue_lock_);
+ queue->swap(immediate_incoming_queue());
+
+ // Activate delayed fence if necessary. This is ideologically similar to
+ // ActivateDelayedFenceIfNeeded, but due to immediate tasks being posted
+ // from any thread we can't generate an enqueue order for the fence there,
+ // so we have to check all immediate tasks and use their enqueue order for
+ // a fence.
+ if (main_thread_only().delayed_fence) {
+ for (const Task& task : *queue) {
+ if (task.delayed_run_time >= main_thread_only().delayed_fence.value()) {
+ main_thread_only().delayed_fence = nullopt;
+ DCHECK(!main_thread_only().current_fence);
+ main_thread_only().current_fence = task.enqueue_order();
+ // Do not trigger WorkQueueSets notification when taking incoming
+ // immediate queue.
+ main_thread_only().immediate_work_queue->InsertFenceSilently(
+ main_thread_only().current_fence);
+ main_thread_only().delayed_work_queue->InsertFenceSilently(
+ main_thread_only().current_fence);
+ break;
+ }
+ }
+ }
+}
+
+bool TaskQueueImpl::IsEmpty() const {
+ if (!main_thread_only().delayed_work_queue->Empty() ||
+ !main_thread_only().delayed_incoming_queue.empty() ||
+ !main_thread_only().immediate_work_queue->Empty()) {
+ return false;
+ }
+
+ AutoLock lock(immediate_incoming_queue_lock_);
+ return immediate_incoming_queue().empty();
+}
+
+size_t TaskQueueImpl::GetNumberOfPendingTasks() const {
+ size_t task_count = 0;
+ task_count += main_thread_only().delayed_work_queue->Size();
+ task_count += main_thread_only().delayed_incoming_queue.size();
+ task_count += main_thread_only().immediate_work_queue->Size();
+
+ AutoLock lock(immediate_incoming_queue_lock_);
+ task_count += immediate_incoming_queue().size();
+ return task_count;
+}
+
+bool TaskQueueImpl::HasTaskToRunImmediately() const {
+ // Any work queue tasks count as immediate work.
+ if (!main_thread_only().delayed_work_queue->Empty() ||
+ !main_thread_only().immediate_work_queue->Empty()) {
+ return true;
+ }
+
+ // Tasks on |delayed_incoming_queue| that could run now, count as
+ // immediate work.
+ if (!main_thread_only().delayed_incoming_queue.empty() &&
+ main_thread_only().delayed_incoming_queue.top().delayed_run_time <=
+ main_thread_only().time_domain->CreateLazyNow().Now()) {
+ return true;
+ }
+
+ // Finally tasks on |immediate_incoming_queue| count as immediate work.
+ AutoLock lock(immediate_incoming_queue_lock_);
+ return !immediate_incoming_queue().empty();
+}
+
+Optional<TaskQueueImpl::DelayedWakeUp>
+TaskQueueImpl::GetNextScheduledWakeUpImpl() {
+ // Note we don't scheduled a wake-up for disabled queues.
+ if (main_thread_only().delayed_incoming_queue.empty() || !IsQueueEnabled())
+ return nullopt;
+
+ return main_thread_only().delayed_incoming_queue.top().delayed_wake_up();
+}
+
+Optional<TimeTicks> TaskQueueImpl::GetNextScheduledWakeUp() {
+ Optional<DelayedWakeUp> wake_up = GetNextScheduledWakeUpImpl();
+ if (!wake_up)
+ return nullopt;
+ return wake_up->time;
+}
+
+void TaskQueueImpl::WakeUpForDelayedWork(LazyNow* lazy_now) {
+ // Enqueue all delayed tasks that should be running now, skipping any that
+ // have been canceled.
+ while (!main_thread_only().delayed_incoming_queue.empty()) {
+ Task& task =
+ const_cast<Task&>(main_thread_only().delayed_incoming_queue.top());
+ if (!task.task || task.task.IsCancelled()) {
+ main_thread_only().delayed_incoming_queue.pop();
+ continue;
+ }
+ if (task.delayed_run_time > lazy_now->Now())
+ break;
+ ActivateDelayedFenceIfNeeded(task.delayed_run_time);
+ task.set_enqueue_order(
+ main_thread_only().sequence_manager->GetNextSequenceNumber());
+ main_thread_only().delayed_work_queue->Push(std::move(task));
+ main_thread_only().delayed_incoming_queue.pop();
+
+ // Normally WakeUpForDelayedWork is called inside DoWork, but it also
+ // can be called elsewhere (e.g. tests and fast-path for posting
+ // delayed tasks). Ensure that there is a DoWork posting. No-op inside
+ // existing DoWork due to DoWork deduplication.
+ if (IsQueueEnabled() || !main_thread_only().current_fence) {
+ main_thread_only().sequence_manager->MaybeScheduleImmediateWork(
+ FROM_HERE);
+ }
+ }
+
+ UpdateDelayedWakeUp(lazy_now);
+}
+
+void TaskQueueImpl::TraceQueueSize() const {
+ bool is_tracing;
+ TRACE_EVENT_CATEGORY_GROUP_ENABLED(
+ TRACE_DISABLED_BY_DEFAULT("sequence_manager"), &is_tracing);
+ if (!is_tracing)
+ return;
+
+ // It's only safe to access the work queues from the main thread.
+ // TODO(alexclarke): We should find another way of tracing this
+ if (PlatformThread::CurrentId() != thread_id_)
+ return;
+
+ AutoLock lock(immediate_incoming_queue_lock_);
+ TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("sequence_manager"), GetName(),
+ immediate_incoming_queue().size() +
+ main_thread_only().immediate_work_queue->Size() +
+ main_thread_only().delayed_work_queue->Size() +
+ main_thread_only().delayed_incoming_queue.size());
+}
+
+void TaskQueueImpl::SetQueuePriority(TaskQueue::QueuePriority priority) {
+ if (!main_thread_only().sequence_manager || priority == GetQueuePriority())
+ return;
+ main_thread_only()
+ .sequence_manager->main_thread_only()
+ .selector.SetQueuePriority(this, priority);
+}
+
+TaskQueue::QueuePriority TaskQueueImpl::GetQueuePriority() const {
+ size_t set_index = immediate_work_queue()->work_queue_set_index();
+ DCHECK_EQ(set_index, delayed_work_queue()->work_queue_set_index());
+ return static_cast<TaskQueue::QueuePriority>(set_index);
+}
+
+void TaskQueueImpl::AsValueInto(TimeTicks now,
+ trace_event::TracedValue* state) const {
+ AutoLock lock(any_thread_lock_);
+ AutoLock immediate_incoming_queue_lock(immediate_incoming_queue_lock_);
+ state->BeginDictionary();
+ state->SetString("name", GetName());
+ if (!main_thread_only().sequence_manager) {
+ state->SetBoolean("unregistered", true);
+ state->EndDictionary();
+ return;
+ }
+ DCHECK(main_thread_only().time_domain);
+ DCHECK(main_thread_only().delayed_work_queue);
+ DCHECK(main_thread_only().immediate_work_queue);
+
+ state->SetString(
+ "task_queue_id",
+ StringPrintf("0x%" PRIx64,
+ static_cast<uint64_t>(reinterpret_cast<uintptr_t>(this))));
+ state->SetBoolean("enabled", IsQueueEnabled());
+ state->SetString("time_domain_name",
+ main_thread_only().time_domain->GetName());
+ state->SetInteger("immediate_incoming_queue_size",
+ immediate_incoming_queue().size());
+ state->SetInteger("delayed_incoming_queue_size",
+ main_thread_only().delayed_incoming_queue.size());
+ state->SetInteger("immediate_work_queue_size",
+ main_thread_only().immediate_work_queue->Size());
+ state->SetInteger("delayed_work_queue_size",
+ main_thread_only().delayed_work_queue->Size());
+
+ if (!main_thread_only().delayed_incoming_queue.empty()) {
+ TimeDelta delay_to_next_task =
+ (main_thread_only().delayed_incoming_queue.top().delayed_run_time -
+ main_thread_only().time_domain->CreateLazyNow().Now());
+ state->SetDouble("delay_to_next_task_ms",
+ delay_to_next_task.InMillisecondsF());
+ }
+ if (main_thread_only().current_fence)
+ state->SetInteger("current_fence", main_thread_only().current_fence);
+ if (main_thread_only().delayed_fence) {
+ state->SetDouble(
+ "delayed_fence_seconds_from_now",
+ (main_thread_only().delayed_fence.value() - now).InSecondsF());
+ }
+
+ bool verbose = false;
+ TRACE_EVENT_CATEGORY_GROUP_ENABLED(
+ TRACE_DISABLED_BY_DEFAULT("sequence_manager.verbose_snapshots"),
+ &verbose);
+
+ if (verbose) {
+ state->BeginArray("immediate_incoming_queue");
+ QueueAsValueInto(immediate_incoming_queue(), now, state);
+ state->EndArray();
+ state->BeginArray("delayed_work_queue");
+ main_thread_only().delayed_work_queue->AsValueInto(now, state);
+ state->EndArray();
+ state->BeginArray("immediate_work_queue");
+ main_thread_only().immediate_work_queue->AsValueInto(now, state);
+ state->EndArray();
+ state->BeginArray("delayed_incoming_queue");
+ QueueAsValueInto(main_thread_only().delayed_incoming_queue, now, state);
+ state->EndArray();
+ }
+ state->SetString("priority", TaskQueue::PriorityToString(GetQueuePriority()));
+ state->EndDictionary();
+}
+
+void TaskQueueImpl::AddTaskObserver(MessageLoop::TaskObserver* task_observer) {
+ main_thread_only().task_observers.AddObserver(task_observer);
+}
+
+void TaskQueueImpl::RemoveTaskObserver(
+ MessageLoop::TaskObserver* task_observer) {
+ main_thread_only().task_observers.RemoveObserver(task_observer);
+}
+
+void TaskQueueImpl::NotifyWillProcessTask(const PendingTask& pending_task) {
+ DCHECK(should_notify_observers_);
+ if (main_thread_only().blame_context)
+ main_thread_only().blame_context->Enter();
+ for (auto& observer : main_thread_only().task_observers)
+ observer.WillProcessTask(pending_task);
+}
+
+void TaskQueueImpl::NotifyDidProcessTask(const PendingTask& pending_task) {
+ DCHECK(should_notify_observers_);
+ for (auto& observer : main_thread_only().task_observers)
+ observer.DidProcessTask(pending_task);
+ if (main_thread_only().blame_context)
+ main_thread_only().blame_context->Leave();
+}
+
+void TaskQueueImpl::SetTimeDomain(TimeDomain* time_domain) {
+ {
+ AutoLock lock(any_thread_lock_);
+ DCHECK(time_domain);
+ // NOTE this is similar to checking |any_thread().sequence_manager| but
+ // the TaskQueueSelectorTests constructs TaskQueueImpl directly with a null
+ // sequence_manager. Instead we check |any_thread().time_domain| which is
+ // another way of asserting that UnregisterTaskQueue has not been called.
+ DCHECK(any_thread().time_domain);
+ if (!any_thread().time_domain)
+ return;
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ if (time_domain == main_thread_only().time_domain)
+ return;
+
+ any_thread().time_domain = time_domain;
+ }
+
+ main_thread_only().time_domain->UnregisterQueue(this);
+ main_thread_only().time_domain = time_domain;
+
+ LazyNow lazy_now = time_domain->CreateLazyNow();
+ // Clear scheduled wake up to ensure that new notifications are issued
+ // correctly.
+ // TODO(altimin): Remove this when we won't have to support changing time
+ // domains.
+ main_thread_only().scheduled_wake_up = nullopt;
+ UpdateDelayedWakeUp(&lazy_now);
+}
+
+TimeDomain* TaskQueueImpl::GetTimeDomain() const {
+ if (PlatformThread::CurrentId() == thread_id_)
+ return main_thread_only().time_domain;
+
+ AutoLock lock(any_thread_lock_);
+ return any_thread().time_domain;
+}
+
+void TaskQueueImpl::SetBlameContext(trace_event::BlameContext* blame_context) {
+ main_thread_only().blame_context = blame_context;
+}
+
+void TaskQueueImpl::InsertFence(TaskQueue::InsertFencePosition position) {
+ if (!main_thread_only().sequence_manager)
+ return;
+
+ // Only one fence may be present at a time.
+ main_thread_only().delayed_fence = nullopt;
+
+ EnqueueOrder previous_fence = main_thread_only().current_fence;
+ EnqueueOrder current_fence =
+ position == TaskQueue::InsertFencePosition::kNow
+ ? main_thread_only().sequence_manager->GetNextSequenceNumber()
+ : EnqueueOrder::blocking_fence();
+
+ // Tasks posted after this point will have a strictly higher enqueue order
+ // and will be blocked from running.
+ main_thread_only().current_fence = current_fence;
+ bool task_unblocked =
+ main_thread_only().immediate_work_queue->InsertFence(current_fence);
+ task_unblocked |=
+ main_thread_only().delayed_work_queue->InsertFence(current_fence);
+
+ if (!task_unblocked && previous_fence && previous_fence < current_fence) {
+ AutoLock lock(immediate_incoming_queue_lock_);
+ if (!immediate_incoming_queue().empty() &&
+ immediate_incoming_queue().front().enqueue_order() > previous_fence &&
+ immediate_incoming_queue().front().enqueue_order() < current_fence) {
+ task_unblocked = true;
+ }
+ }
+
+ if (IsQueueEnabled() && task_unblocked) {
+ main_thread_only().sequence_manager->MaybeScheduleImmediateWork(FROM_HERE);
+ }
+}
+
+void TaskQueueImpl::InsertFenceAt(TimeTicks time) {
+ // Task queue can have only one fence, delayed or not.
+ RemoveFence();
+ main_thread_only().delayed_fence = time;
+}
+
+void TaskQueueImpl::RemoveFence() {
+ if (!main_thread_only().sequence_manager)
+ return;
+
+ EnqueueOrder previous_fence = main_thread_only().current_fence;
+ main_thread_only().current_fence = EnqueueOrder::none();
+ main_thread_only().delayed_fence = nullopt;
+
+ bool task_unblocked = main_thread_only().immediate_work_queue->RemoveFence();
+ task_unblocked |= main_thread_only().delayed_work_queue->RemoveFence();
+
+ if (!task_unblocked && previous_fence) {
+ AutoLock lock(immediate_incoming_queue_lock_);
+ if (!immediate_incoming_queue().empty() &&
+ immediate_incoming_queue().front().enqueue_order() > previous_fence) {
+ task_unblocked = true;
+ }
+ }
+
+ if (IsQueueEnabled() && task_unblocked) {
+ main_thread_only().sequence_manager->MaybeScheduleImmediateWork(FROM_HERE);
+ }
+}
+
+bool TaskQueueImpl::BlockedByFence() const {
+ if (!main_thread_only().current_fence)
+ return false;
+
+ if (!main_thread_only().immediate_work_queue->BlockedByFence() ||
+ !main_thread_only().delayed_work_queue->BlockedByFence()) {
+ return false;
+ }
+
+ AutoLock lock(immediate_incoming_queue_lock_);
+ if (immediate_incoming_queue().empty())
+ return true;
+
+ return immediate_incoming_queue().front().enqueue_order() >
+ main_thread_only().current_fence;
+}
+
+bool TaskQueueImpl::HasActiveFence() {
+ if (main_thread_only().delayed_fence &&
+ main_thread_only().time_domain->Now() >
+ main_thread_only().delayed_fence.value()) {
+ return true;
+ }
+ return !!main_thread_only().current_fence;
+}
+
+bool TaskQueueImpl::CouldTaskRun(EnqueueOrder enqueue_order) const {
+ if (!IsQueueEnabled())
+ return false;
+
+ if (!main_thread_only().current_fence)
+ return true;
+
+ return enqueue_order < main_thread_only().current_fence;
+}
+
+// static
+void TaskQueueImpl::QueueAsValueInto(const TaskDeque& queue,
+ TimeTicks now,
+ trace_event::TracedValue* state) {
+ for (const Task& task : queue) {
+ TaskAsValueInto(task, now, state);
+ }
+}
+
+// static
+void TaskQueueImpl::QueueAsValueInto(const std::priority_queue<Task>& queue,
+ TimeTicks now,
+ trace_event::TracedValue* state) {
+ // Remove const to search |queue| in the destructive manner. Restore the
+ // content from |visited| later.
+ std::priority_queue<Task>* mutable_queue =
+ const_cast<std::priority_queue<Task>*>(&queue);
+ std::priority_queue<Task> visited;
+ while (!mutable_queue->empty()) {
+ TaskAsValueInto(mutable_queue->top(), now, state);
+ visited.push(std::move(const_cast<Task&>(mutable_queue->top())));
+ mutable_queue->pop();
+ }
+ *mutable_queue = std::move(visited);
+}
+
+// static
+void TaskQueueImpl::TaskAsValueInto(const Task& task,
+ TimeTicks now,
+ trace_event::TracedValue* state) {
+ state->BeginDictionary();
+ state->SetString("posted_from", task.posted_from.ToString());
+ if (task.enqueue_order_set())
+ state->SetInteger("enqueue_order", task.enqueue_order());
+ state->SetInteger("sequence_num", task.sequence_num);
+ state->SetBoolean("nestable", task.nestable == Nestable::kNestable);
+ state->SetBoolean("is_high_res", task.is_high_res);
+ state->SetBoolean("is_cancelled", task.task.IsCancelled());
+ state->SetDouble("delayed_run_time",
+ (task.delayed_run_time - TimeTicks()).InMillisecondsF());
+ state->SetDouble("delayed_run_time_milliseconds_from_now",
+ (task.delayed_run_time - now).InMillisecondsF());
+ state->EndDictionary();
+}
+
+TaskQueueImpl::QueueEnabledVoterImpl::QueueEnabledVoterImpl(
+ scoped_refptr<TaskQueue> task_queue)
+ : task_queue_(task_queue), enabled_(true) {}
+
+TaskQueueImpl::QueueEnabledVoterImpl::~QueueEnabledVoterImpl() {
+ if (task_queue_->GetTaskQueueImpl())
+ task_queue_->GetTaskQueueImpl()->RemoveQueueEnabledVoter(this);
+}
+
+void TaskQueueImpl::QueueEnabledVoterImpl::SetQueueEnabled(bool enabled) {
+ if (enabled_ == enabled)
+ return;
+
+ task_queue_->GetTaskQueueImpl()->OnQueueEnabledVoteChanged(enabled);
+ enabled_ = enabled;
+}
+
+void TaskQueueImpl::RemoveQueueEnabledVoter(
+ const QueueEnabledVoterImpl* voter) {
+ // Bail out if we're being called from TaskQueueImpl::UnregisterTaskQueue.
+ if (!main_thread_only().time_domain)
+ return;
+
+ bool was_enabled = IsQueueEnabled();
+ if (voter->enabled_) {
+ main_thread_only().is_enabled_refcount--;
+ DCHECK_GE(main_thread_only().is_enabled_refcount, 0);
+ }
+
+ main_thread_only().voter_refcount--;
+ DCHECK_GE(main_thread_only().voter_refcount, 0);
+
+ bool is_enabled = IsQueueEnabled();
+ if (was_enabled != is_enabled)
+ EnableOrDisableWithSelector(is_enabled);
+}
+
+bool TaskQueueImpl::IsQueueEnabled() const {
+ // By default is_enabled_refcount and voter_refcount both equal zero.
+ return (main_thread_only().is_enabled_refcount ==
+ main_thread_only().voter_refcount) &&
+ main_thread_only().is_enabled_for_test;
+}
+
+void TaskQueueImpl::OnQueueEnabledVoteChanged(bool enabled) {
+ bool was_enabled = IsQueueEnabled();
+ if (enabled) {
+ main_thread_only().is_enabled_refcount++;
+ DCHECK_LE(main_thread_only().is_enabled_refcount,
+ main_thread_only().voter_refcount);
+ } else {
+ main_thread_only().is_enabled_refcount--;
+ DCHECK_GE(main_thread_only().is_enabled_refcount, 0);
+ }
+
+ bool is_enabled = IsQueueEnabled();
+ if (was_enabled != is_enabled)
+ EnableOrDisableWithSelector(is_enabled);
+}
+
+void TaskQueueImpl::EnableOrDisableWithSelector(bool enable) {
+ if (!main_thread_only().sequence_manager)
+ return;
+
+ LazyNow lazy_now = main_thread_only().time_domain->CreateLazyNow();
+ UpdateDelayedWakeUp(&lazy_now);
+
+ if (enable) {
+ if (HasPendingImmediateWork() &&
+ !main_thread_only().on_next_wake_up_changed_callback.is_null()) {
+ // Delayed work notification will be issued via time domain.
+ main_thread_only().on_next_wake_up_changed_callback.Run(TimeTicks());
+ }
+
+ // Note the selector calls SequenceManager::OnTaskQueueEnabled which posts
+ // a DoWork if needed.
+ main_thread_only()
+ .sequence_manager->main_thread_only()
+ .selector.EnableQueue(this);
+ } else {
+ main_thread_only()
+ .sequence_manager->main_thread_only()
+ .selector.DisableQueue(this);
+ }
+}
+
+std::unique_ptr<TaskQueue::QueueEnabledVoter>
+TaskQueueImpl::CreateQueueEnabledVoter(scoped_refptr<TaskQueue> task_queue) {
+ DCHECK_EQ(task_queue->GetTaskQueueImpl(), this);
+ main_thread_only().voter_refcount++;
+ main_thread_only().is_enabled_refcount++;
+ return std::make_unique<QueueEnabledVoterImpl>(task_queue);
+}
+
+void TaskQueueImpl::SweepCanceledDelayedTasks(TimeTicks now) {
+ if (main_thread_only().delayed_incoming_queue.empty())
+ return;
+
+ // Remove canceled tasks.
+ std::priority_queue<Task> remaining_tasks;
+ while (!main_thread_only().delayed_incoming_queue.empty()) {
+ if (!main_thread_only().delayed_incoming_queue.top().task.IsCancelled()) {
+ remaining_tasks.push(std::move(
+ const_cast<Task&>(main_thread_only().delayed_incoming_queue.top())));
+ }
+ main_thread_only().delayed_incoming_queue.pop();
+ }
+
+ main_thread_only().delayed_incoming_queue = std::move(remaining_tasks);
+
+ LazyNow lazy_now(now);
+ UpdateDelayedWakeUp(&lazy_now);
+}
+
+void TaskQueueImpl::PushImmediateIncomingTaskForTest(
+ TaskQueueImpl::Task&& task) {
+ AutoLock lock(immediate_incoming_queue_lock_);
+ immediate_incoming_queue().push_back(std::move(task));
+}
+
+void TaskQueueImpl::RequeueDeferredNonNestableTask(
+ DeferredNonNestableTask task) {
+ DCHECK(task.task.nestable == Nestable::kNonNestable);
+ // The re-queued tasks have to be pushed onto the front because we'd otherwise
+ // violate the strict monotonically increasing enqueue order within the
+ // WorkQueue. We can't assign them a new enqueue order here because that will
+ // not behave correctly with fences and things will break (e.g Idle TQ).
+ if (task.work_queue_type == WorkQueueType::kDelayed) {
+ main_thread_only().delayed_work_queue->PushNonNestableTaskToFront(
+ std::move(task.task));
+ } else {
+ main_thread_only().immediate_work_queue->PushNonNestableTaskToFront(
+ std::move(task.task));
+ }
+}
+
+void TaskQueueImpl::SetOnNextWakeUpChangedCallback(
+ TaskQueueImpl::OnNextWakeUpChangedCallback callback) {
+#if DCHECK_IS_ON()
+ if (callback) {
+ DCHECK(main_thread_only().on_next_wake_up_changed_callback.is_null())
+ << "Can't assign two different observers to "
+ "blink::scheduler::TaskQueue";
+ }
+#endif
+ AutoLock lock(any_thread_lock_);
+ any_thread().on_next_wake_up_changed_callback = callback;
+ main_thread_only().on_next_wake_up_changed_callback = callback;
+}
+
+void TaskQueueImpl::UpdateDelayedWakeUp(LazyNow* lazy_now) {
+ return UpdateDelayedWakeUpImpl(lazy_now, GetNextScheduledWakeUpImpl());
+}
+
+void TaskQueueImpl::UpdateDelayedWakeUpImpl(
+ LazyNow* lazy_now,
+ Optional<TaskQueueImpl::DelayedWakeUp> wake_up) {
+ if (main_thread_only().scheduled_wake_up == wake_up)
+ return;
+ main_thread_only().scheduled_wake_up = wake_up;
+
+ if (wake_up &&
+ !main_thread_only().on_next_wake_up_changed_callback.is_null() &&
+ !HasPendingImmediateWork()) {
+ main_thread_only().on_next_wake_up_changed_callback.Run(wake_up->time);
+ }
+
+ main_thread_only().time_domain->SetNextWakeUpForQueue(this, wake_up,
+ lazy_now);
+}
+
+void TaskQueueImpl::SetDelayedWakeUpForTesting(
+ Optional<TaskQueueImpl::DelayedWakeUp> wake_up) {
+ LazyNow lazy_now = main_thread_only().time_domain->CreateLazyNow();
+ UpdateDelayedWakeUpImpl(&lazy_now, wake_up);
+}
+
+bool TaskQueueImpl::HasPendingImmediateWork() {
+ // Any work queue tasks count as immediate work.
+ if (!main_thread_only().delayed_work_queue->Empty() ||
+ !main_thread_only().immediate_work_queue->Empty()) {
+ return true;
+ }
+
+ // Finally tasks on |immediate_incoming_queue| count as immediate work.
+ AutoLock lock(immediate_incoming_queue_lock_);
+ return !immediate_incoming_queue().empty();
+}
+
+void TaskQueueImpl::SetOnTaskStartedHandler(
+ TaskQueueImpl::OnTaskStartedHandler handler) {
+ main_thread_only().on_task_started_handler = std::move(handler);
+}
+
+void TaskQueueImpl::OnTaskStarted(const TaskQueue::Task& task,
+ const TaskQueue::TaskTiming& task_timing) {
+ if (!main_thread_only().on_task_started_handler.is_null())
+ main_thread_only().on_task_started_handler.Run(task, task_timing);
+}
+
+void TaskQueueImpl::SetOnTaskCompletedHandler(
+ TaskQueueImpl::OnTaskCompletedHandler handler) {
+ main_thread_only().on_task_completed_handler = std::move(handler);
+}
+
+void TaskQueueImpl::OnTaskCompleted(const TaskQueue::Task& task,
+ const TaskQueue::TaskTiming& task_timing) {
+ if (!main_thread_only().on_task_completed_handler.is_null())
+ main_thread_only().on_task_completed_handler.Run(task, task_timing);
+}
+
+bool TaskQueueImpl::RequiresTaskTiming() const {
+ return !main_thread_only().on_task_started_handler.is_null() ||
+ !main_thread_only().on_task_completed_handler.is_null();
+}
+
+bool TaskQueueImpl::IsUnregistered() const {
+ AutoLock lock(any_thread_lock_);
+ return !any_thread().sequence_manager;
+}
+
+WeakPtr<SequenceManagerImpl> TaskQueueImpl::GetSequenceManagerWeakPtr() {
+ return main_thread_only().sequence_manager->GetWeakPtr();
+}
+
+scoped_refptr<GracefulQueueShutdownHelper>
+TaskQueueImpl::GetGracefulQueueShutdownHelper() {
+ return main_thread_only().sequence_manager->GetGracefulQueueShutdownHelper();
+}
+
+void TaskQueueImpl::SetQueueEnabledForTest(bool enabled) {
+ main_thread_only().is_enabled_for_test = enabled;
+ EnableOrDisableWithSelector(IsQueueEnabled());
+}
+
+void TaskQueueImpl::ActivateDelayedFenceIfNeeded(TimeTicks now) {
+ if (!main_thread_only().delayed_fence)
+ return;
+ if (main_thread_only().delayed_fence.value() > now)
+ return;
+ InsertFence(TaskQueue::InsertFencePosition::kNow);
+ main_thread_only().delayed_fence = nullopt;
+}
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
diff --git a/chromium/base/task/sequence_manager/task_queue_impl.h b/chromium/base/task/sequence_manager/task_queue_impl.h
new file mode 100644
index 00000000000..b64dd9fd46d
--- /dev/null
+++ b/chromium/base/task/sequence_manager/task_queue_impl.h
@@ -0,0 +1,471 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_IMPL_H_
+#define BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_IMPL_H_
+
+#include <stddef.h>
+
+#include <memory>
+#include <set>
+
+#include "base/callback.h"
+#include "base/containers/circular_deque.h"
+#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "base/pending_task.h"
+#include "base/task/sequence_manager/enqueue_order.h"
+#include "base/task/sequence_manager/intrusive_heap.h"
+#include "base/task/sequence_manager/lazily_deallocated_deque.h"
+#include "base/task/sequence_manager/sequenced_task_source.h"
+#include "base/task/sequence_manager/task_queue.h"
+#include "base/threading/thread_checker.h"
+#include "base/trace_event/trace_event.h"
+#include "base/trace_event/trace_event_argument.h"
+
+namespace base {
+namespace sequence_manager {
+
+class LazyNow;
+class TimeDomain;
+
+namespace internal {
+
+class SequenceManagerImpl;
+class WorkQueue;
+class WorkQueueSets;
+
+struct IncomingImmediateWorkList {
+ IncomingImmediateWorkList* next = nullptr;
+ TaskQueueImpl* queue = nullptr;
+ internal::EnqueueOrder order;
+};
+
+// TaskQueueImpl has four main queues:
+//
+// Immediate (non-delayed) tasks:
+// |immediate_incoming_queue| - PostTask enqueues tasks here.
+// |immediate_work_queue| - SequenceManager takes immediate tasks here.
+//
+// Delayed tasks
+// |delayed_incoming_queue| - PostDelayedTask enqueues tasks here.
+// |delayed_work_queue| - SequenceManager takes delayed tasks here.
+//
+// The |immediate_incoming_queue| can be accessed from any thread, the other
+// queues are main-thread only. To reduce the overhead of locking,
+// |immediate_work_queue| is swapped with |immediate_incoming_queue| when
+// |immediate_work_queue| becomes empty.
+//
+// Delayed tasks are initially posted to |delayed_incoming_queue| and a wake-up
+// is scheduled with the TimeDomain. When the delay has elapsed, the TimeDomain
+// calls UpdateDelayedWorkQueue and ready delayed tasks are moved into the
+// |delayed_work_queue|. Note the EnqueueOrder (used for ordering) for a delayed
+// task is not set until it's moved into the |delayed_work_queue|.
+//
+// TaskQueueImpl uses the WorkQueueSets and the TaskQueueSelector to implement
+// prioritization. Task selection is done by the TaskQueueSelector and when a
+// queue is selected, it round-robins between the |immediate_work_queue| and
+// |delayed_work_queue|. The reason for this is we want to make sure delayed
+// tasks (normally the most common type) don't starve out immediate work.
+class BASE_EXPORT TaskQueueImpl {
+ public:
+ TaskQueueImpl(SequenceManagerImpl* sequence_manager,
+ TimeDomain* time_domain,
+ const TaskQueue::Spec& spec);
+
+ ~TaskQueueImpl();
+
+ // Represents a time at which a task wants to run. Tasks scheduled for the
+ // same point in time will be ordered by their sequence numbers.
+ struct DelayedWakeUp {
+ TimeTicks time;
+ int sequence_num;
+
+ bool operator!=(const DelayedWakeUp& other) const {
+ return time != other.time || other.sequence_num != sequence_num;
+ }
+
+ bool operator==(const DelayedWakeUp& other) const {
+ return !(*this != other);
+ }
+
+ bool operator<=(const DelayedWakeUp& other) const {
+ if (time == other.time) {
+ // Debug gcc builds can compare an element against itself.
+ DCHECK(sequence_num != other.sequence_num || this == &other);
+ // |PostedTask::sequence_num| is int and might wrap around to
+ // a negative number when casted from EnqueueOrder.
+ // This way of comparison handles that properly.
+ return (sequence_num - other.sequence_num) <= 0;
+ }
+ return time < other.time;
+ }
+ };
+
+ class BASE_EXPORT Task : public TaskQueue::Task {
+ public:
+ Task(TaskQueue::PostedTask task,
+ TimeTicks desired_run_time,
+ EnqueueOrder sequence_number);
+
+ Task(TaskQueue::PostedTask task,
+ TimeTicks desired_run_time,
+ EnqueueOrder sequence_number,
+ EnqueueOrder enqueue_order);
+
+ DelayedWakeUp delayed_wake_up() const {
+ // Since we use |sequence_num| in DelayedWakeUp for ordering purposes
+ // and integer overflow handling is type-sensitive it's worth to protect
+ // it from an unnoticed potential change in the PendingTask base class.
+ static_assert(std::is_same<decltype(sequence_num), int>::value, "");
+ return DelayedWakeUp{delayed_run_time, sequence_num};
+ }
+
+ EnqueueOrder enqueue_order() const {
+ DCHECK(enqueue_order_);
+ return enqueue_order_;
+ }
+
+ void set_enqueue_order(EnqueueOrder enqueue_order) {
+ DCHECK(!enqueue_order_);
+ enqueue_order_ = enqueue_order;
+ }
+
+ bool enqueue_order_set() const { return enqueue_order_; }
+
+ private:
+ // Similar to sequence number, but ultimately the |enqueue_order_| is what
+ // the scheduler uses for task ordering. For immediate tasks |enqueue_order|
+ // is set when posted, but for delayed tasks it's not defined until they are
+ // enqueued on the |delayed_work_queue_|. This is because otherwise delayed
+ // tasks could run before an immediate task posted after the delayed task.
+ EnqueueOrder enqueue_order_;
+ };
+
+ // A result retuned by PostDelayedTask. When scheduler failed to post a task
+ // due to being shutdown a task is returned to be destroyed outside the lock.
+ struct PostTaskResult {
+ PostTaskResult();
+ PostTaskResult(bool success, TaskQueue::PostedTask task);
+ PostTaskResult(PostTaskResult&& move_from);
+ PostTaskResult(const PostTaskResult& copy_from) = delete;
+ ~PostTaskResult();
+
+ static PostTaskResult Success();
+ static PostTaskResult Fail(TaskQueue::PostedTask task);
+
+ bool success;
+ TaskQueue::PostedTask task;
+ };
+
+ // Types of queues TaskQueueImpl is maintaining internally.
+ enum class WorkQueueType { kImmediate, kDelayed };
+
+ // Non-nestable tasks may get deferred but such queue is being maintained on
+ // SequenceManager side, so we need to keep information how to requeue it.
+ struct DeferredNonNestableTask {
+ internal::TaskQueueImpl::Task task;
+ internal::TaskQueueImpl* task_queue;
+ WorkQueueType work_queue_type;
+ };
+
+ using OnNextWakeUpChangedCallback = RepeatingCallback<void(TimeTicks)>;
+ using OnTaskStartedHandler =
+ RepeatingCallback<void(const TaskQueue::Task&,
+ const TaskQueue::TaskTiming&)>;
+ using OnTaskCompletedHandler =
+ RepeatingCallback<void(const TaskQueue::Task&,
+ const TaskQueue::TaskTiming&)>;
+
+ // TaskQueue implementation.
+ const char* GetName() const;
+ bool RunsTasksInCurrentSequence() const;
+ PostTaskResult PostDelayedTask(TaskQueue::PostedTask task);
+ // Require a reference to enclosing task queue for lifetime control.
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> CreateQueueEnabledVoter(
+ scoped_refptr<TaskQueue> owning_task_queue);
+ bool IsQueueEnabled() const;
+ bool IsEmpty() const;
+ size_t GetNumberOfPendingTasks() const;
+ bool HasTaskToRunImmediately() const;
+ Optional<TimeTicks> GetNextScheduledWakeUp();
+ Optional<DelayedWakeUp> GetNextScheduledWakeUpImpl();
+ void SetQueuePriority(TaskQueue::QueuePriority priority);
+ TaskQueue::QueuePriority GetQueuePriority() const;
+ void AddTaskObserver(MessageLoop::TaskObserver* task_observer);
+ void RemoveTaskObserver(MessageLoop::TaskObserver* task_observer);
+ void SetTimeDomain(TimeDomain* time_domain);
+ TimeDomain* GetTimeDomain() const;
+ void SetBlameContext(trace_event::BlameContext* blame_context);
+ void InsertFence(TaskQueue::InsertFencePosition position);
+ void InsertFenceAt(TimeTicks time);
+ void RemoveFence();
+ bool HasActiveFence();
+ bool BlockedByFence() const;
+ // Implementation of TaskQueue::SetObserver.
+ void SetOnNextWakeUpChangedCallback(OnNextWakeUpChangedCallback callback);
+
+ void UnregisterTaskQueue();
+
+ // Returns true if a (potentially hypothetical) task with the specified
+ // |enqueue_order| could run on the queue. Must be called from the main
+ // thread.
+ bool CouldTaskRun(EnqueueOrder enqueue_order) const;
+
+ // Must only be called from the thread this task queue was created on.
+ void ReloadImmediateWorkQueueIfEmpty();
+
+ void AsValueInto(TimeTicks now, trace_event::TracedValue* state) const;
+
+ bool GetQuiescenceMonitored() const { return should_monitor_quiescence_; }
+ bool GetShouldNotifyObservers() const { return should_notify_observers_; }
+
+ void NotifyWillProcessTask(const PendingTask& pending_task);
+ void NotifyDidProcessTask(const PendingTask& pending_task);
+
+ // Check for available tasks in immediate work queues.
+ // Used to check if we need to generate notifications about delayed work.
+ bool HasPendingImmediateWork();
+
+ WorkQueue* delayed_work_queue() {
+ return main_thread_only().delayed_work_queue.get();
+ }
+
+ const WorkQueue* delayed_work_queue() const {
+ return main_thread_only().delayed_work_queue.get();
+ }
+
+ WorkQueue* immediate_work_queue() {
+ return main_thread_only().immediate_work_queue.get();
+ }
+
+ const WorkQueue* immediate_work_queue() const {
+ return main_thread_only().immediate_work_queue.get();
+ }
+
+ // Protected by SequenceManagerImpl's AnyThread lock.
+ IncomingImmediateWorkList* immediate_work_list_storage() {
+ return &immediate_work_list_storage_;
+ }
+
+ // Enqueues any delayed tasks which should be run now on the
+ // |delayed_work_queue|.
+ // Must be called from the main thread.
+ void WakeUpForDelayedWork(LazyNow* lazy_now);
+
+ HeapHandle heap_handle() const { return main_thread_only().heap_handle; }
+
+ void set_heap_handle(HeapHandle heap_handle) {
+ main_thread_only().heap_handle = heap_handle;
+ }
+
+ // Pushes |task| onto the front of the specified work queue. Caution must be
+ // taken with this API because you could easily starve out other work.
+ // TODO(kraynov): Simplify non-nestable task logic https://crbug.com/845437.
+ void RequeueDeferredNonNestableTask(DeferredNonNestableTask task);
+
+ void PushImmediateIncomingTaskForTest(TaskQueueImpl::Task&& task);
+
+ class QueueEnabledVoterImpl : public TaskQueue::QueueEnabledVoter {
+ public:
+ explicit QueueEnabledVoterImpl(scoped_refptr<TaskQueue> task_queue);
+ ~QueueEnabledVoterImpl() override;
+
+ // QueueEnabledVoter implementation.
+ void SetQueueEnabled(bool enabled) override;
+
+ TaskQueueImpl* GetTaskQueueForTest() const {
+ return task_queue_->GetTaskQueueImpl();
+ }
+
+ private:
+ friend class TaskQueueImpl;
+
+ scoped_refptr<TaskQueue> task_queue_;
+ bool enabled_;
+ };
+
+ // Iterates over |delayed_incoming_queue| removing canceled tasks.
+ void SweepCanceledDelayedTasks(TimeTicks now);
+
+ // Allows wrapping TaskQueue to set a handler to subscribe for notifications
+ // about started and completed tasks.
+ void SetOnTaskStartedHandler(OnTaskStartedHandler handler);
+ void OnTaskStarted(const TaskQueue::Task& task,
+ const TaskQueue::TaskTiming& task_timing);
+ void SetOnTaskCompletedHandler(OnTaskCompletedHandler handler);
+ void OnTaskCompleted(const TaskQueue::Task& task,
+ const TaskQueue::TaskTiming& task_timing);
+ bool RequiresTaskTiming() const;
+
+ WeakPtr<SequenceManagerImpl> GetSequenceManagerWeakPtr();
+
+ scoped_refptr<GracefulQueueShutdownHelper> GetGracefulQueueShutdownHelper();
+
+ // Returns true if this queue is unregistered or task queue manager is deleted
+ // and this queue can be safely deleted on any thread.
+ bool IsUnregistered() const;
+
+ // Disables queue for testing purposes, when a QueueEnabledVoter can't be
+ // constructed due to not having TaskQueue.
+ void SetQueueEnabledForTest(bool enabled);
+
+ protected:
+ void SetDelayedWakeUpForTesting(Optional<DelayedWakeUp> wake_up);
+
+ private:
+ friend class WorkQueue;
+ friend class WorkQueueTest;
+
+ struct AnyThread {
+ AnyThread(SequenceManagerImpl* sequence_manager, TimeDomain* time_domain);
+ ~AnyThread();
+
+ // SequenceManagerImpl, TimeDomain and Observer are maintained in two
+ // copies: inside AnyThread and inside MainThreadOnly. They can be changed
+ // only from main thread, so it should be locked before accessing from other
+ // threads.
+ SequenceManagerImpl* sequence_manager;
+ TimeDomain* time_domain;
+ // Callback corresponding to TaskQueue::Observer::OnQueueNextChanged.
+ OnNextWakeUpChangedCallback on_next_wake_up_changed_callback;
+ };
+
+ struct MainThreadOnly {
+ MainThreadOnly(SequenceManagerImpl* sequence_manager,
+ TaskQueueImpl* task_queue,
+ TimeDomain* time_domain);
+ ~MainThreadOnly();
+
+ // Another copy of SequenceManagerImpl, TimeDomain and Observer
+ // for lock-free access from the main thread.
+ // See description inside struct AnyThread for details.
+ SequenceManagerImpl* sequence_manager;
+ TimeDomain* time_domain;
+ // Callback corresponding to TaskQueue::Observer::OnQueueNextChanged.
+ OnNextWakeUpChangedCallback on_next_wake_up_changed_callback;
+
+ std::unique_ptr<WorkQueue> delayed_work_queue;
+ std::unique_ptr<WorkQueue> immediate_work_queue;
+ std::priority_queue<TaskQueueImpl::Task> delayed_incoming_queue;
+ ObserverList<MessageLoop::TaskObserver> task_observers;
+ size_t set_index;
+ HeapHandle heap_handle;
+ int is_enabled_refcount;
+ int voter_refcount;
+ trace_event::BlameContext* blame_context; // Not owned.
+ EnqueueOrder current_fence;
+ Optional<TimeTicks> delayed_fence;
+ OnTaskStartedHandler on_task_started_handler;
+ OnTaskCompletedHandler on_task_completed_handler;
+ // Last reported wake up, used only in UpdateWakeUp to avoid
+ // excessive calls.
+ Optional<DelayedWakeUp> scheduled_wake_up;
+ // If false, queue will be disabled. Used only for tests.
+ bool is_enabled_for_test;
+ };
+
+ PostTaskResult PostImmediateTaskImpl(TaskQueue::PostedTask task);
+ PostTaskResult PostDelayedTaskImpl(TaskQueue::PostedTask task);
+
+ // Push the task onto the |delayed_incoming_queue|. Lock-free main thread
+ // only fast path.
+ void PushOntoDelayedIncomingQueueFromMainThread(Task pending_task,
+ TimeTicks now);
+
+ // Push the task onto the |delayed_incoming_queue|. Slow path from other
+ // threads.
+ void PushOntoDelayedIncomingQueueLocked(Task pending_task);
+
+ void ScheduleDelayedWorkTask(Task pending_task);
+
+ void MoveReadyImmediateTasksToImmediateWorkQueueLocked();
+
+ // Push the task onto the |immediate_incoming_queue| and for auto pumped
+ // queues it calls MaybePostDoWorkOnMainRunner if the Incoming queue was
+ // empty.
+ void PushOntoImmediateIncomingQueueLocked(Task task);
+
+ using TaskDeque = circular_deque<Task>;
+
+ // Extracts all the tasks from the immediate incoming queue and swaps it with
+ // |queue| which must be empty.
+ // Can be called from any thread.
+ void ReloadEmptyImmediateQueue(TaskDeque* queue);
+
+ void TraceQueueSize() const;
+ static void QueueAsValueInto(const TaskDeque& queue,
+ TimeTicks now,
+ trace_event::TracedValue* state);
+ static void QueueAsValueInto(const std::priority_queue<Task>& queue,
+ TimeTicks now,
+ trace_event::TracedValue* state);
+ static void TaskAsValueInto(const Task& task,
+ TimeTicks now,
+ trace_event::TracedValue* state);
+
+ void RemoveQueueEnabledVoter(const QueueEnabledVoterImpl* voter);
+ void OnQueueEnabledVoteChanged(bool enabled);
+ void EnableOrDisableWithSelector(bool enable);
+
+ // Schedules delayed work on time domain and calls the observer.
+ void UpdateDelayedWakeUp(LazyNow* lazy_now);
+ void UpdateDelayedWakeUpImpl(LazyNow* lazy_now,
+ Optional<DelayedWakeUp> wake_up);
+
+ // Activate a delayed fence if a time has come.
+ void ActivateDelayedFenceIfNeeded(TimeTicks now);
+
+ const char* name_;
+
+ const PlatformThreadId thread_id_;
+
+ mutable Lock any_thread_lock_;
+ AnyThread any_thread_;
+ struct AnyThread& any_thread() {
+ any_thread_lock_.AssertAcquired();
+ return any_thread_;
+ }
+ const struct AnyThread& any_thread() const {
+ any_thread_lock_.AssertAcquired();
+ return any_thread_;
+ }
+
+ ThreadChecker main_thread_checker_;
+ MainThreadOnly main_thread_only_;
+ MainThreadOnly& main_thread_only() {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ return main_thread_only_;
+ }
+ const MainThreadOnly& main_thread_only() const {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ return main_thread_only_;
+ }
+
+ mutable Lock immediate_incoming_queue_lock_;
+ TaskDeque immediate_incoming_queue_;
+ TaskDeque& immediate_incoming_queue() {
+ immediate_incoming_queue_lock_.AssertAcquired();
+ return immediate_incoming_queue_;
+ }
+ const TaskDeque& immediate_incoming_queue() const {
+ immediate_incoming_queue_lock_.AssertAcquired();
+ return immediate_incoming_queue_;
+ }
+
+ // Protected by SequenceManagerImpl's AnyThread lock.
+ IncomingImmediateWorkList immediate_work_list_storage_;
+
+ const bool should_monitor_quiescence_;
+ const bool should_notify_observers_;
+
+ DISALLOW_COPY_AND_ASSIGN(TaskQueueImpl);
+};
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_IMPL_H_
diff --git a/chromium/base/task/sequence_manager/task_queue_selector.cc b/chromium/base/task/sequence_manager/task_queue_selector.cc
new file mode 100644
index 00000000000..30a88bd9a98
--- /dev/null
+++ b/chromium/base/task/sequence_manager/task_queue_selector.cc
@@ -0,0 +1,407 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/task_queue_selector.h"
+
+#include "base/logging.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/task/sequence_manager/task_queue_impl.h"
+#include "base/task/sequence_manager/work_queue.h"
+#include "base/trace_event/trace_event_argument.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+namespace {
+
+TaskQueueSelectorLogic QueuePriorityToSelectorLogic(
+ TaskQueue::QueuePriority priority) {
+ switch (priority) {
+ case TaskQueue::kControlPriority:
+ return TaskQueueSelectorLogic::kControlPriorityLogic;
+ case TaskQueue::kHighestPriority:
+ return TaskQueueSelectorLogic::kHighestPriorityLogic;
+ case TaskQueue::kHighPriority:
+ return TaskQueueSelectorLogic::kHighPriorityLogic;
+ case TaskQueue::kNormalPriority:
+ return TaskQueueSelectorLogic::kNormalPriorityLogic;
+ case TaskQueue::kLowPriority:
+ return TaskQueueSelectorLogic::kLowPriorityLogic;
+ case TaskQueue::kBestEffortPriority:
+ return TaskQueueSelectorLogic::kBestEffortPriorityLogic;
+ default:
+ NOTREACHED();
+ return TaskQueueSelectorLogic::kCount;
+ }
+}
+
+// Helper function used to report the number of times a selector logic is
+// trigerred. This will create a histogram for the enumerated data.
+void ReportTaskSelectionLogic(TaskQueueSelectorLogic selector_logic) {
+ UMA_HISTOGRAM_ENUMERATION("TaskQueueSelector.TaskServicedPerSelectorLogic",
+ selector_logic, TaskQueueSelectorLogic::kCount);
+}
+
+} // namespace
+
+TaskQueueSelector::TaskQueueSelector()
+ : prioritizing_selector_(this, "enabled"),
+ immediate_starvation_count_(0),
+ high_priority_starvation_score_(0),
+ normal_priority_starvation_score_(0),
+ low_priority_starvation_score_(0),
+ task_queue_selector_observer_(nullptr) {}
+
+TaskQueueSelector::~TaskQueueSelector() = default;
+
+void TaskQueueSelector::AddQueue(internal::TaskQueueImpl* queue) {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ DCHECK(queue->IsQueueEnabled());
+ prioritizing_selector_.AddQueue(queue, TaskQueue::kNormalPriority);
+}
+
+void TaskQueueSelector::RemoveQueue(internal::TaskQueueImpl* queue) {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ if (queue->IsQueueEnabled()) {
+ prioritizing_selector_.RemoveQueue(queue);
+ }
+}
+
+void TaskQueueSelector::EnableQueue(internal::TaskQueueImpl* queue) {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ DCHECK(queue->IsQueueEnabled());
+ prioritizing_selector_.AddQueue(queue, queue->GetQueuePriority());
+ if (task_queue_selector_observer_)
+ task_queue_selector_observer_->OnTaskQueueEnabled(queue);
+}
+
+void TaskQueueSelector::DisableQueue(internal::TaskQueueImpl* queue) {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ DCHECK(!queue->IsQueueEnabled());
+ prioritizing_selector_.RemoveQueue(queue);
+}
+
+void TaskQueueSelector::SetQueuePriority(internal::TaskQueueImpl* queue,
+ TaskQueue::QueuePriority priority) {
+ DCHECK_LT(priority, TaskQueue::kQueuePriorityCount);
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ if (queue->IsQueueEnabled()) {
+ prioritizing_selector_.ChangeSetIndex(queue, priority);
+ } else {
+ // Disabled queue is not in any set so we can't use ChangeSetIndex here
+ // and have to assign priority for the queue itself.
+ queue->delayed_work_queue()->AssignSetIndex(priority);
+ queue->immediate_work_queue()->AssignSetIndex(priority);
+ }
+ DCHECK_EQ(priority, queue->GetQueuePriority());
+}
+
+TaskQueue::QueuePriority TaskQueueSelector::NextPriority(
+ TaskQueue::QueuePriority priority) {
+ DCHECK(priority < TaskQueue::kQueuePriorityCount);
+ return static_cast<TaskQueue::QueuePriority>(static_cast<int>(priority) + 1);
+}
+
+TaskQueueSelector::PrioritizingSelector::PrioritizingSelector(
+ TaskQueueSelector* task_queue_selector,
+ const char* name)
+ : task_queue_selector_(task_queue_selector),
+ delayed_work_queue_sets_(TaskQueue::kQueuePriorityCount, name),
+ immediate_work_queue_sets_(TaskQueue::kQueuePriorityCount, name) {}
+
+void TaskQueueSelector::PrioritizingSelector::AddQueue(
+ internal::TaskQueueImpl* queue,
+ TaskQueue::QueuePriority priority) {
+#if DCHECK_IS_ON()
+ DCHECK(!CheckContainsQueueForTest(queue));
+#endif
+ delayed_work_queue_sets_.AddQueue(queue->delayed_work_queue(), priority);
+ immediate_work_queue_sets_.AddQueue(queue->immediate_work_queue(), priority);
+#if DCHECK_IS_ON()
+ DCHECK(CheckContainsQueueForTest(queue));
+#endif
+}
+
+void TaskQueueSelector::PrioritizingSelector::ChangeSetIndex(
+ internal::TaskQueueImpl* queue,
+ TaskQueue::QueuePriority priority) {
+#if DCHECK_IS_ON()
+ DCHECK(CheckContainsQueueForTest(queue));
+#endif
+ delayed_work_queue_sets_.ChangeSetIndex(queue->delayed_work_queue(),
+ priority);
+ immediate_work_queue_sets_.ChangeSetIndex(queue->immediate_work_queue(),
+ priority);
+#if DCHECK_IS_ON()
+ DCHECK(CheckContainsQueueForTest(queue));
+#endif
+}
+
+void TaskQueueSelector::PrioritizingSelector::RemoveQueue(
+ internal::TaskQueueImpl* queue) {
+#if DCHECK_IS_ON()
+ DCHECK(CheckContainsQueueForTest(queue));
+#endif
+ delayed_work_queue_sets_.RemoveQueue(queue->delayed_work_queue());
+ immediate_work_queue_sets_.RemoveQueue(queue->immediate_work_queue());
+
+#if DCHECK_IS_ON()
+ DCHECK(!CheckContainsQueueForTest(queue));
+#endif
+}
+
+bool TaskQueueSelector::PrioritizingSelector::
+ ChooseOldestImmediateTaskWithPriority(TaskQueue::QueuePriority priority,
+ WorkQueue** out_work_queue) const {
+ return immediate_work_queue_sets_.GetOldestQueueInSet(priority,
+ out_work_queue);
+}
+
+bool TaskQueueSelector::PrioritizingSelector::
+ ChooseOldestDelayedTaskWithPriority(TaskQueue::QueuePriority priority,
+ WorkQueue** out_work_queue) const {
+ return delayed_work_queue_sets_.GetOldestQueueInSet(priority, out_work_queue);
+}
+
+bool TaskQueueSelector::PrioritizingSelector::
+ ChooseOldestImmediateOrDelayedTaskWithPriority(
+ TaskQueue::QueuePriority priority,
+ bool* out_chose_delayed_over_immediate,
+ WorkQueue** out_work_queue) const {
+ WorkQueue* immediate_queue;
+ DCHECK_EQ(*out_chose_delayed_over_immediate, false);
+ EnqueueOrder immediate_enqueue_order;
+ if (immediate_work_queue_sets_.GetOldestQueueAndEnqueueOrderInSet(
+ priority, &immediate_queue, &immediate_enqueue_order)) {
+ WorkQueue* delayed_queue;
+ EnqueueOrder delayed_enqueue_order;
+ if (delayed_work_queue_sets_.GetOldestQueueAndEnqueueOrderInSet(
+ priority, &delayed_queue, &delayed_enqueue_order)) {
+ if (immediate_enqueue_order < delayed_enqueue_order) {
+ *out_work_queue = immediate_queue;
+ } else {
+ *out_chose_delayed_over_immediate = true;
+ *out_work_queue = delayed_queue;
+ }
+ } else {
+ *out_work_queue = immediate_queue;
+ }
+ return true;
+ }
+ return delayed_work_queue_sets_.GetOldestQueueInSet(priority, out_work_queue);
+}
+
+bool TaskQueueSelector::PrioritizingSelector::ChooseOldestWithPriority(
+ TaskQueue::QueuePriority priority,
+ bool* out_chose_delayed_over_immediate,
+ WorkQueue** out_work_queue) const {
+ // Select an immediate work queue if we are starving immediate tasks.
+ if (task_queue_selector_->immediate_starvation_count_ >=
+ kMaxDelayedStarvationTasks) {
+ if (ChooseOldestImmediateTaskWithPriority(priority, out_work_queue))
+ return true;
+ return ChooseOldestDelayedTaskWithPriority(priority, out_work_queue);
+ }
+ return ChooseOldestImmediateOrDelayedTaskWithPriority(
+ priority, out_chose_delayed_over_immediate, out_work_queue);
+}
+
+bool TaskQueueSelector::PrioritizingSelector::SelectWorkQueueToService(
+ TaskQueue::QueuePriority max_priority,
+ WorkQueue** out_work_queue,
+ bool* out_chose_delayed_over_immediate) {
+ DCHECK(task_queue_selector_->main_thread_checker_.CalledOnValidThread());
+ DCHECK_EQ(*out_chose_delayed_over_immediate, false);
+
+ // Always service the control queue if it has any work.
+ if (max_priority > TaskQueue::kControlPriority &&
+ ChooseOldestWithPriority(TaskQueue::kControlPriority,
+ out_chose_delayed_over_immediate,
+ out_work_queue)) {
+ ReportTaskSelectionLogic(TaskQueueSelectorLogic::kControlPriorityLogic);
+ return true;
+ }
+
+ // Select from the low priority queue if we are starving it.
+ if (max_priority > TaskQueue::kLowPriority &&
+ task_queue_selector_->low_priority_starvation_score_ >=
+ kMaxLowPriorityStarvationScore &&
+ ChooseOldestWithPriority(TaskQueue::kLowPriority,
+ out_chose_delayed_over_immediate,
+ out_work_queue)) {
+ ReportTaskSelectionLogic(
+ TaskQueueSelectorLogic::kLowPriorityStarvationLogic);
+ return true;
+ }
+
+ // Select from the normal priority queue if we are starving it.
+ if (max_priority > TaskQueue::kNormalPriority &&
+ task_queue_selector_->normal_priority_starvation_score_ >=
+ kMaxNormalPriorityStarvationScore &&
+ ChooseOldestWithPriority(TaskQueue::kNormalPriority,
+ out_chose_delayed_over_immediate,
+ out_work_queue)) {
+ ReportTaskSelectionLogic(
+ TaskQueueSelectorLogic::kNormalPriorityStarvationLogic);
+ return true;
+ }
+
+ // Select from the high priority queue if we are starving it.
+ if (max_priority > TaskQueue::kHighPriority &&
+ task_queue_selector_->high_priority_starvation_score_ >=
+ kMaxHighPriorityStarvationScore &&
+ ChooseOldestWithPriority(TaskQueue::kHighPriority,
+ out_chose_delayed_over_immediate,
+ out_work_queue)) {
+ ReportTaskSelectionLogic(
+ TaskQueueSelectorLogic::kHighPriorityStarvationLogic);
+ return true;
+ }
+
+ // Otherwise choose in priority order.
+ for (TaskQueue::QueuePriority priority = TaskQueue::kHighestPriority;
+ priority < max_priority; priority = NextPriority(priority)) {
+ if (ChooseOldestWithPriority(priority, out_chose_delayed_over_immediate,
+ out_work_queue)) {
+ ReportTaskSelectionLogic(QueuePriorityToSelectorLogic(priority));
+ return true;
+ }
+ }
+ return false;
+}
+
+#if DCHECK_IS_ON() || !defined(NDEBUG)
+bool TaskQueueSelector::PrioritizingSelector::CheckContainsQueueForTest(
+ const internal::TaskQueueImpl* queue) const {
+ bool contains_delayed_work_queue =
+ delayed_work_queue_sets_.ContainsWorkQueueForTest(
+ queue->delayed_work_queue());
+
+ bool contains_immediate_work_queue =
+ immediate_work_queue_sets_.ContainsWorkQueueForTest(
+ queue->immediate_work_queue());
+
+ DCHECK_EQ(contains_delayed_work_queue, contains_immediate_work_queue);
+ return contains_delayed_work_queue;
+}
+#endif
+
+bool TaskQueueSelector::SelectWorkQueueToService(WorkQueue** out_work_queue) {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ bool chose_delayed_over_immediate = false;
+ bool found_queue = prioritizing_selector_.SelectWorkQueueToService(
+ TaskQueue::kQueuePriorityCount, out_work_queue,
+ &chose_delayed_over_immediate);
+ if (!found_queue)
+ return false;
+
+ // We could use |(*out_work_queue)->task_queue()->GetQueuePriority()| here but
+ // for re-queued non-nestable tasks |task_queue()| returns null.
+ DidSelectQueueWithPriority(static_cast<TaskQueue::QueuePriority>(
+ (*out_work_queue)->work_queue_set_index()),
+ chose_delayed_over_immediate);
+ return true;
+}
+
+void TaskQueueSelector::DidSelectQueueWithPriority(
+ TaskQueue::QueuePriority priority,
+ bool chose_delayed_over_immediate) {
+ switch (priority) {
+ case TaskQueue::kControlPriority:
+ break;
+ case TaskQueue::kHighestPriority:
+ low_priority_starvation_score_ +=
+ HasTasksWithPriority(TaskQueue::kLowPriority)
+ ? kSmallScoreIncrementForLowPriorityStarvation
+ : 0;
+ normal_priority_starvation_score_ +=
+ HasTasksWithPriority(TaskQueue::kNormalPriority)
+ ? kSmallScoreIncrementForNormalPriorityStarvation
+ : 0;
+ high_priority_starvation_score_ +=
+ HasTasksWithPriority(TaskQueue::kHighPriority)
+ ? kSmallScoreIncrementForHighPriorityStarvation
+ : 0;
+ break;
+ case TaskQueue::kHighPriority:
+ low_priority_starvation_score_ +=
+ HasTasksWithPriority(TaskQueue::kLowPriority)
+ ? kLargeScoreIncrementForLowPriorityStarvation
+ : 0;
+ normal_priority_starvation_score_ +=
+ HasTasksWithPriority(TaskQueue::kNormalPriority)
+ ? kLargeScoreIncrementForNormalPriorityStarvation
+ : 0;
+ high_priority_starvation_score_ = 0;
+ break;
+ case TaskQueue::kNormalPriority:
+ low_priority_starvation_score_ +=
+ HasTasksWithPriority(TaskQueue::kLowPriority)
+ ? kLargeScoreIncrementForLowPriorityStarvation
+ : 0;
+ normal_priority_starvation_score_ = 0;
+ break;
+ case TaskQueue::kLowPriority:
+ case TaskQueue::kBestEffortPriority:
+ low_priority_starvation_score_ = 0;
+ high_priority_starvation_score_ = 0;
+ normal_priority_starvation_score_ = 0;
+ break;
+ default:
+ NOTREACHED();
+ }
+ if (chose_delayed_over_immediate) {
+ immediate_starvation_count_++;
+ } else {
+ immediate_starvation_count_ = 0;
+ }
+}
+
+void TaskQueueSelector::AsValueInto(trace_event::TracedValue* state) const {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ state->SetInteger("high_priority_starvation_score",
+ high_priority_starvation_score_);
+ state->SetInteger("normal_priority_starvation_score",
+ normal_priority_starvation_score_);
+ state->SetInteger("low_priority_starvation_score",
+ low_priority_starvation_score_);
+ state->SetInteger("immediate_starvation_count", immediate_starvation_count_);
+}
+
+void TaskQueueSelector::SetTaskQueueSelectorObserver(Observer* observer) {
+ task_queue_selector_observer_ = observer;
+}
+
+bool TaskQueueSelector::AllEnabledWorkQueuesAreEmpty() const {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ for (TaskQueue::QueuePriority priority = TaskQueue::kControlPriority;
+ priority < TaskQueue::kQueuePriorityCount;
+ priority = NextPriority(priority)) {
+ if (!prioritizing_selector_.delayed_work_queue_sets()->IsSetEmpty(
+ priority) ||
+ !prioritizing_selector_.immediate_work_queue_sets()->IsSetEmpty(
+ priority)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void TaskQueueSelector::SetImmediateStarvationCountForTest(
+ size_t immediate_starvation_count) {
+ immediate_starvation_count_ = immediate_starvation_count;
+}
+
+bool TaskQueueSelector::HasTasksWithPriority(
+ TaskQueue::QueuePriority priority) {
+ return !prioritizing_selector_.delayed_work_queue_sets()->IsSetEmpty(
+ priority) ||
+ !prioritizing_selector_.immediate_work_queue_sets()->IsSetEmpty(
+ priority);
+}
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
diff --git a/chromium/base/task/sequence_manager/task_queue_selector.h b/chromium/base/task/sequence_manager/task_queue_selector.h
new file mode 100644
index 00000000000..182158be3a1
--- /dev/null
+++ b/chromium/base/task/sequence_manager/task_queue_selector.h
@@ -0,0 +1,225 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_SELECTOR_H_
+#define BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_SELECTOR_H_
+
+#include <stddef.h>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/pending_task.h"
+#include "base/task/sequence_manager/task_queue_selector_logic.h"
+#include "base/task/sequence_manager/work_queue_sets.h"
+#include "base/threading/thread_checker.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+// TaskQueueSelector is used by the SchedulerHelper to enable prioritization
+// of particular task queues.
+class BASE_EXPORT TaskQueueSelector {
+ public:
+ TaskQueueSelector();
+ ~TaskQueueSelector();
+
+ // Called to register a queue that can be selected. This function is called
+ // on the main thread.
+ void AddQueue(internal::TaskQueueImpl* queue);
+
+ // The specified work will no longer be considered for selection. This
+ // function is called on the main thread.
+ void RemoveQueue(internal::TaskQueueImpl* queue);
+
+ // Make |queue| eligible for selection. This function is called on the main
+ // thread. Must only be called if |queue| is disabled.
+ void EnableQueue(internal::TaskQueueImpl* queue);
+
+ // Disable selection from |queue|. Must only be called if |queue| is enabled.
+ void DisableQueue(internal::TaskQueueImpl* queue);
+
+ // Called get or set the priority of |queue|.
+ void SetQueuePriority(internal::TaskQueueImpl* queue,
+ TaskQueue::QueuePriority priority);
+
+ // Called to choose the work queue from which the next task should be taken
+ // and run. Return true if |out_work_queue| indicates the queue to service or
+ // false to avoid running any task.
+ //
+ // This function is called on the main thread.
+ bool SelectWorkQueueToService(WorkQueue** out_work_queue);
+
+ // Serialize the selector state for tracing.
+ void AsValueInto(trace_event::TracedValue* state) const;
+
+ class BASE_EXPORT Observer {
+ public:
+ virtual ~Observer() = default;
+
+ // Called when |queue| transitions from disabled to enabled.
+ virtual void OnTaskQueueEnabled(internal::TaskQueueImpl* queue) = 0;
+ };
+
+ // Called once to set the Observer. This function is called
+ // on the main thread. If |observer| is null, then no callbacks will occur.
+ void SetTaskQueueSelectorObserver(Observer* observer);
+
+ // Returns true if all the enabled work queues are empty. Returns false
+ // otherwise.
+ bool AllEnabledWorkQueuesAreEmpty() const;
+
+ protected:
+ class BASE_EXPORT PrioritizingSelector {
+ public:
+ PrioritizingSelector(TaskQueueSelector* task_queue_selector,
+ const char* name);
+
+ void ChangeSetIndex(internal::TaskQueueImpl* queue,
+ TaskQueue::QueuePriority priority);
+ void AddQueue(internal::TaskQueueImpl* queue,
+ TaskQueue::QueuePriority priority);
+ void RemoveQueue(internal::TaskQueueImpl* queue);
+
+ bool SelectWorkQueueToService(TaskQueue::QueuePriority max_priority,
+ WorkQueue** out_work_queue,
+ bool* out_chose_delayed_over_immediate);
+
+ WorkQueueSets* delayed_work_queue_sets() {
+ return &delayed_work_queue_sets_;
+ }
+ WorkQueueSets* immediate_work_queue_sets() {
+ return &immediate_work_queue_sets_;
+ }
+
+ const WorkQueueSets* delayed_work_queue_sets() const {
+ return &delayed_work_queue_sets_;
+ }
+ const WorkQueueSets* immediate_work_queue_sets() const {
+ return &immediate_work_queue_sets_;
+ }
+
+ bool ChooseOldestWithPriority(TaskQueue::QueuePriority priority,
+ bool* out_chose_delayed_over_immediate,
+ WorkQueue** out_work_queue) const;
+
+#if DCHECK_IS_ON() || !defined(NDEBUG)
+ bool CheckContainsQueueForTest(const internal::TaskQueueImpl* queue) const;
+#endif
+
+ private:
+ bool ChooseOldestImmediateTaskWithPriority(
+ TaskQueue::QueuePriority priority,
+ WorkQueue** out_work_queue) const;
+
+ bool ChooseOldestDelayedTaskWithPriority(TaskQueue::QueuePriority priority,
+ WorkQueue** out_work_queue) const;
+
+ // Return true if |out_queue| contains the queue with the oldest pending
+ // task from the set of queues of |priority|, or false if all queues of that
+ // priority are empty. In addition |out_chose_delayed_over_immediate| is set
+ // to true iff we chose a delayed work queue in favour of an immediate work
+ // queue.
+ bool ChooseOldestImmediateOrDelayedTaskWithPriority(
+ TaskQueue::QueuePriority priority,
+ bool* out_chose_delayed_over_immediate,
+ WorkQueue** out_work_queue) const;
+
+ const TaskQueueSelector* task_queue_selector_;
+ WorkQueueSets delayed_work_queue_sets_;
+ WorkQueueSets immediate_work_queue_sets_;
+
+ DISALLOW_COPY_AND_ASSIGN(PrioritizingSelector);
+ };
+
+ // Return true if |out_queue| contains the queue with the oldest pending task
+ // from the set of queues of |priority|, or false if all queues of that
+ // priority are empty. In addition |out_chose_delayed_over_immediate| is set
+ // to true iff we chose a delayed work queue in favour of an immediate work
+ // queue. This method will force select an immediate task if those are being
+ // starved by delayed tasks.
+ void SetImmediateStarvationCountForTest(size_t immediate_starvation_count);
+
+ PrioritizingSelector* prioritizing_selector_for_test() {
+ return &prioritizing_selector_;
+ }
+
+ // Maximum score to accumulate before high priority tasks are run even in
+ // the presence of highest priority tasks.
+ static const size_t kMaxHighPriorityStarvationScore = 3;
+
+ // Increment to be applied to the high priority starvation score when a task
+ // should have only a small effect on the score. E.g. A number of highest
+ // priority tasks must run before the high priority queue is considered
+ // starved.
+ static const size_t kSmallScoreIncrementForHighPriorityStarvation = 1;
+
+ // Maximum score to accumulate before normal priority tasks are run even in
+ // the presence of higher priority tasks i.e. highest and high priority tasks.
+ static const size_t kMaxNormalPriorityStarvationScore = 5;
+
+ // Increment to be applied to the normal priority starvation score when a task
+ // should have a large effect on the score. E.g Only a few high priority
+ // priority tasks must run before the normal priority queue is considered
+ // starved.
+ static const size_t kLargeScoreIncrementForNormalPriorityStarvation = 2;
+
+ // Increment to be applied to the normal priority starvation score when a task
+ // should have only a small effect on the score. E.g. A number of highest
+ // priority tasks must run before the normal priority queue is considered
+ // starved.
+ static const size_t kSmallScoreIncrementForNormalPriorityStarvation = 1;
+
+ // Maximum score to accumulate before low priority tasks are run even in the
+ // presence of highest, high, or normal priority tasks.
+ static const size_t kMaxLowPriorityStarvationScore = 25;
+
+ // Increment to be applied to the low priority starvation score when a task
+ // should have a large effect on the score. E.g. Only a few normal/high
+ // priority tasks must run before the low priority queue is considered
+ // starved.
+ static const size_t kLargeScoreIncrementForLowPriorityStarvation = 5;
+
+ // Increment to be applied to the low priority starvation score when a task
+ // should have only a small effect on the score. E.g. A lot of highest
+ // priority tasks must run before the low priority queue is considered
+ // starved.
+ static const size_t kSmallScoreIncrementForLowPriorityStarvation = 1;
+
+ // Maximum number of delayed tasks tasks which can be run while there's a
+ // waiting non-delayed task.
+ static const size_t kMaxDelayedStarvationTasks = 3;
+
+ private:
+ // Returns the priority which is next after |priority|.
+ static TaskQueue::QueuePriority NextPriority(
+ TaskQueue::QueuePriority priority);
+
+ bool SelectWorkQueueToServiceInternal(WorkQueue** out_work_queue);
+
+ // Called whenever the selector chooses a task queue for execution with the
+ // priority |priority|.
+ void DidSelectQueueWithPriority(TaskQueue::QueuePriority priority,
+ bool chose_delayed_over_immediate);
+
+ // Returns true if there are pending tasks with priority |priority|.
+ bool HasTasksWithPriority(TaskQueue::QueuePriority priority);
+
+ ThreadChecker main_thread_checker_;
+
+ PrioritizingSelector prioritizing_selector_;
+ size_t immediate_starvation_count_;
+ size_t high_priority_starvation_score_;
+ size_t normal_priority_starvation_score_;
+ size_t low_priority_starvation_score_;
+
+ Observer* task_queue_selector_observer_; // Not owned.
+ DISALLOW_COPY_AND_ASSIGN(TaskQueueSelector);
+};
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_SELECTOR_H_
diff --git a/chromium/base/task/sequence_manager/task_queue_selector_logic.h b/chromium/base/task/sequence_manager/task_queue_selector_logic.h
new file mode 100644
index 00000000000..8cf8933783e
--- /dev/null
+++ b/chromium/base/task/sequence_manager/task_queue_selector_logic.h
@@ -0,0 +1,37 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_SELECTOR_LOGIC_H_
+#define BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_SELECTOR_LOGIC_H_
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+// Used to describe the logic trigerred when a task queue is selected to
+// service.
+// This enum is used for histograms and should not be renumbered.
+enum class TaskQueueSelectorLogic {
+
+ // Selected due to priority rules.
+ kControlPriorityLogic = 0,
+ kHighestPriorityLogic = 1,
+ kHighPriorityLogic = 2,
+ kNormalPriorityLogic = 3,
+ kLowPriorityLogic = 4,
+ kBestEffortPriorityLogic = 5,
+
+ // Selected due to starvation logic.
+ kHighPriorityStarvationLogic = 6,
+ kNormalPriorityStarvationLogic = 7,
+ kLowPriorityStarvationLogic = 8,
+
+ kCount = 9,
+};
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_SELECTOR_LOGIC_H_
diff --git a/chromium/base/task/sequence_manager/task_queue_selector_unittest.cc b/chromium/base/task/sequence_manager/task_queue_selector_unittest.cc
new file mode 100644
index 00000000000..c3742a2b2ef
--- /dev/null
+++ b/chromium/base/task/sequence_manager/task_queue_selector_unittest.cc
@@ -0,0 +1,885 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/task_queue_selector.h"
+
+#include <stddef.h>
+
+#include <memory>
+
+#include "base/bind.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/pending_task.h"
+#include "base/task/sequence_manager/task_queue_impl.h"
+#include "base/task/sequence_manager/test/mock_time_domain.h"
+#include "base/task/sequence_manager/work_queue.h"
+#include "base/task/sequence_manager/work_queue_sets.h"
+#include "base/test/metrics/histogram_tester.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using testing::_;
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+// To avoid symbol collisions in jumbo builds.
+namespace task_queue_selector_unittest {
+
+class MockObserver : public TaskQueueSelector::Observer {
+ public:
+ MockObserver() = default;
+ ~MockObserver() override = default;
+
+ MOCK_METHOD1(OnTaskQueueEnabled, void(internal::TaskQueueImpl*));
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MockObserver);
+};
+
+class TaskQueueSelectorForTest : public TaskQueueSelector {
+ public:
+ using TaskQueueSelector::prioritizing_selector_for_test;
+ using TaskQueueSelector::PrioritizingSelector;
+ using TaskQueueSelector::SetImmediateStarvationCountForTest;
+
+ // Returns the number of highest priority tasks needed to starve high priority
+ // task.
+ static constexpr size_t NumberOfHighestPriorityToStarveHighPriority() {
+ return (kMaxHighPriorityStarvationScore +
+ kSmallScoreIncrementForHighPriorityStarvation - 1) /
+ kSmallScoreIncrementForHighPriorityStarvation;
+ }
+
+ // Returns the number of highest priority tasks needed to starve normal
+ // priority tasks.
+ static constexpr size_t NumberOfHighestPriorityToStarveNormalPriority() {
+ return (kMaxNormalPriorityStarvationScore +
+ kSmallScoreIncrementForNormalPriorityStarvation - 1) /
+ kSmallScoreIncrementForNormalPriorityStarvation;
+ }
+
+ // Returns the number of high priority tasks needed to starve normal priority
+ // tasks.
+ static constexpr size_t NumberOfHighPriorityToStarveNormalPriority() {
+ return (kMaxNormalPriorityStarvationScore +
+ kLargeScoreIncrementForNormalPriorityStarvation - 1) /
+ kLargeScoreIncrementForNormalPriorityStarvation;
+ }
+
+ // Returns the number of highest priority tasks needed to starve low priority
+ // ones.
+ static constexpr size_t NumberOfHighestPriorityToStarveLowPriority() {
+ return (kMaxLowPriorityStarvationScore +
+ kSmallScoreIncrementForLowPriorityStarvation - 1) /
+ kSmallScoreIncrementForLowPriorityStarvation;
+ }
+
+ // Returns the number of high/normal priority tasks needed to starve low
+ // priority ones.
+ static constexpr size_t NumberOfHighAndNormalPriorityToStarveLowPriority() {
+ return (kMaxLowPriorityStarvationScore +
+ kLargeScoreIncrementForLowPriorityStarvation - 1) /
+ kLargeScoreIncrementForLowPriorityStarvation;
+ }
+};
+
+class TaskQueueSelectorTest : public testing::Test {
+ public:
+ TaskQueueSelectorTest()
+ : test_closure_(BindRepeating(&TaskQueueSelectorTest::TestFunction)) {}
+ ~TaskQueueSelectorTest() override = default;
+
+ TaskQueueSelectorForTest::PrioritizingSelector* prioritizing_selector() {
+ return selector_.prioritizing_selector_for_test();
+ }
+
+ WorkQueueSets* delayed_work_queue_sets() {
+ return prioritizing_selector()->delayed_work_queue_sets();
+ }
+ WorkQueueSets* immediate_work_queue_sets() {
+ return prioritizing_selector()->immediate_work_queue_sets();
+ }
+
+ void PushTasks(const size_t queue_indices[], size_t num_tasks) {
+ std::set<size_t> changed_queue_set;
+ EnqueueOrder::Generator enqueue_order_generator;
+ for (size_t i = 0; i < num_tasks; i++) {
+ changed_queue_set.insert(queue_indices[i]);
+ task_queues_[queue_indices[i]]->immediate_work_queue()->Push(
+ TaskQueueImpl::Task(TaskQueue::PostedTask(test_closure_, FROM_HERE),
+ TimeTicks(), EnqueueOrder(),
+ enqueue_order_generator.GenerateNext()));
+ }
+ }
+
+ void PushTasksWithEnqueueOrder(const size_t queue_indices[],
+ const size_t enqueue_orders[],
+ size_t num_tasks) {
+ std::set<size_t> changed_queue_set;
+ for (size_t i = 0; i < num_tasks; i++) {
+ changed_queue_set.insert(queue_indices[i]);
+ task_queues_[queue_indices[i]]->immediate_work_queue()->Push(
+ TaskQueueImpl::Task(
+ TaskQueue::PostedTask(test_closure_, FROM_HERE), TimeTicks(),
+ EnqueueOrder(),
+ EnqueueOrder::FromIntForTesting(enqueue_orders[i])));
+ }
+ }
+
+ std::vector<size_t> PopTasks() {
+ std::vector<size_t> order;
+ WorkQueue* chosen_work_queue;
+ while (selector_.SelectWorkQueueToService(&chosen_work_queue)) {
+ size_t chosen_queue_index =
+ queue_to_index_map_.find(chosen_work_queue->task_queue())->second;
+ order.push_back(chosen_queue_index);
+ chosen_work_queue->PopTaskForTesting();
+ immediate_work_queue_sets()->OnPopQueue(chosen_work_queue);
+ }
+ return order;
+ }
+
+ static void TestFunction() {}
+
+ protected:
+ void SetUp() final {
+ time_domain_ = std::make_unique<MockTimeDomain>(TimeTicks() +
+ TimeDelta::FromSeconds(1));
+ for (size_t i = 0; i < kTaskQueueCount; i++) {
+ std::unique_ptr<TaskQueueImpl> task_queue =
+ std::make_unique<TaskQueueImpl>(nullptr, time_domain_.get(),
+ TaskQueue::Spec("test"));
+ selector_.AddQueue(task_queue.get());
+ task_queues_.push_back(std::move(task_queue));
+ }
+ for (size_t i = 0; i < kTaskQueueCount; i++) {
+ EXPECT_EQ(TaskQueue::kNormalPriority, task_queues_[i]->GetQueuePriority())
+ << i;
+ queue_to_index_map_.insert(std::make_pair(task_queues_[i].get(), i));
+ }
+ histogram_tester_.reset(new HistogramTester());
+ }
+
+ void TearDown() final {
+ for (std::unique_ptr<TaskQueueImpl>& task_queue : task_queues_) {
+ // Note since this test doesn't have a SequenceManager we need to
+ // manually remove |task_queue| from the |selector_|. Normally
+ // UnregisterTaskQueue would do that.
+ selector_.RemoveQueue(task_queue.get());
+ task_queue->UnregisterTaskQueue();
+ }
+ }
+
+ std::unique_ptr<TaskQueueImpl> NewTaskQueueWithBlockReporting() {
+ return std::make_unique<TaskQueueImpl>(nullptr, time_domain_.get(),
+ TaskQueue::Spec("test"));
+ }
+
+ const size_t kTaskQueueCount = 5;
+ RepeatingClosure test_closure_;
+ TaskQueueSelectorForTest selector_;
+ std::unique_ptr<TimeDomain> time_domain_;
+ std::vector<std::unique_ptr<TaskQueueImpl>> task_queues_;
+ std::map<TaskQueueImpl*, size_t> queue_to_index_map_;
+ std::unique_ptr<HistogramTester> histogram_tester_;
+};
+
+TEST_F(TaskQueueSelectorTest, TestDefaultPriority) {
+ size_t queue_order[] = {4, 3, 2, 1, 0};
+ PushTasks(queue_order, 5);
+ EXPECT_THAT(PopTasks(), testing::ElementsAre(4, 3, 2, 1, 0));
+ EXPECT_EQ(histogram_tester_->GetBucketCount(
+ "TaskQueueSelector.TaskServicedPerSelectorLogic",
+ static_cast<int>(TaskQueueSelectorLogic::kNormalPriorityLogic)),
+ 5);
+}
+
+TEST_F(TaskQueueSelectorTest, TestHighestPriority) {
+ size_t queue_order[] = {0, 1, 2, 3, 4};
+ PushTasks(queue_order, 5);
+ selector_.SetQueuePriority(task_queues_[2].get(),
+ TaskQueue::kHighestPriority);
+ EXPECT_THAT(PopTasks(), ::testing::ElementsAre(2, 0, 1, 3, 4));
+ EXPECT_EQ(
+ histogram_tester_->GetBucketCount(
+ "TaskQueueSelector.TaskServicedPerSelectorLogic",
+ static_cast<int>(TaskQueueSelectorLogic::kHighestPriorityLogic)),
+ 1);
+}
+
+TEST_F(TaskQueueSelectorTest, TestHighPriority) {
+ size_t queue_order[] = {0, 1, 2, 3, 4};
+ PushTasks(queue_order, 5);
+ selector_.SetQueuePriority(task_queues_[2].get(),
+ TaskQueue::kHighestPriority);
+ selector_.SetQueuePriority(task_queues_[1].get(), TaskQueue::kHighPriority);
+ selector_.SetQueuePriority(task_queues_[0].get(), TaskQueue::kLowPriority);
+ EXPECT_THAT(PopTasks(), ::testing::ElementsAre(2, 1, 3, 4, 0));
+ EXPECT_EQ(histogram_tester_->GetBucketCount(
+ "TaskQueueSelector.TaskServicedPerSelectorLogic",
+ static_cast<int>(TaskQueueSelectorLogic::kHighPriorityLogic)),
+ 1);
+}
+
+TEST_F(TaskQueueSelectorTest, TestLowPriority) {
+ size_t queue_order[] = {0, 1, 2, 3, 4};
+ PushTasks(queue_order, 5);
+ selector_.SetQueuePriority(task_queues_[2].get(), TaskQueue::kLowPriority);
+ EXPECT_THAT(PopTasks(), testing::ElementsAre(0, 1, 3, 4, 2));
+ EXPECT_EQ(histogram_tester_->GetBucketCount(
+ "TaskQueueSelector.TaskServicedPerSelectorLogic",
+ static_cast<int>(TaskQueueSelectorLogic::kLowPriorityLogic)),
+ 1);
+}
+
+TEST_F(TaskQueueSelectorTest, TestBestEffortPriority) {
+ size_t queue_order[] = {0, 1, 2, 3, 4};
+ PushTasks(queue_order, 5);
+ selector_.SetQueuePriority(task_queues_[0].get(),
+ TaskQueue::kBestEffortPriority);
+ selector_.SetQueuePriority(task_queues_[2].get(), TaskQueue::kLowPriority);
+ selector_.SetQueuePriority(task_queues_[3].get(),
+ TaskQueue::kHighestPriority);
+ EXPECT_THAT(PopTasks(), ::testing::ElementsAre(3, 1, 4, 2, 0));
+ EXPECT_EQ(
+ histogram_tester_->GetBucketCount(
+ "TaskQueueSelector.TaskServicedPerSelectorLogic",
+ static_cast<int>(TaskQueueSelectorLogic::kBestEffortPriorityLogic)),
+ 1);
+}
+
+TEST_F(TaskQueueSelectorTest, TestControlPriority) {
+ size_t queue_order[] = {0, 1, 2, 3, 4};
+ PushTasks(queue_order, 5);
+ selector_.SetQueuePriority(task_queues_[4].get(),
+ TaskQueue::kControlPriority);
+ EXPECT_EQ(TaskQueue::kControlPriority, task_queues_[4]->GetQueuePriority());
+ selector_.SetQueuePriority(task_queues_[2].get(),
+ TaskQueue::kHighestPriority);
+ EXPECT_EQ(TaskQueue::kHighestPriority, task_queues_[2]->GetQueuePriority());
+ EXPECT_THAT(PopTasks(), ::testing::ElementsAre(4, 2, 0, 1, 3));
+ EXPECT_EQ(
+ histogram_tester_->GetBucketCount(
+ "TaskQueueSelector.TaskServicedPerSelectorLogic",
+ static_cast<int>(TaskQueueSelectorLogic::kControlPriorityLogic)),
+ 1);
+}
+
+TEST_F(TaskQueueSelectorTest, TestObserverWithEnabledQueue) {
+ task_queues_[1]->SetQueueEnabledForTest(false);
+ selector_.DisableQueue(task_queues_[1].get());
+ MockObserver mock_observer;
+ selector_.SetTaskQueueSelectorObserver(&mock_observer);
+ EXPECT_CALL(mock_observer, OnTaskQueueEnabled(_)).Times(1);
+ task_queues_[1]->SetQueueEnabledForTest(true);
+ selector_.EnableQueue(task_queues_[1].get());
+}
+
+TEST_F(TaskQueueSelectorTest,
+ TestObserverWithSetQueuePriorityAndQueueAlreadyEnabled) {
+ selector_.SetQueuePriority(task_queues_[1].get(),
+ TaskQueue::kHighestPriority);
+ MockObserver mock_observer;
+ selector_.SetTaskQueueSelectorObserver(&mock_observer);
+ EXPECT_CALL(mock_observer, OnTaskQueueEnabled(_)).Times(0);
+ selector_.SetQueuePriority(task_queues_[1].get(), TaskQueue::kNormalPriority);
+}
+
+TEST_F(TaskQueueSelectorTest, TestDisableEnable) {
+ MockObserver mock_observer;
+ selector_.SetTaskQueueSelectorObserver(&mock_observer);
+
+ size_t queue_order[] = {0, 1, 2, 3, 4};
+ PushTasks(queue_order, 5);
+ task_queues_[2]->SetQueueEnabledForTest(false);
+ selector_.DisableQueue(task_queues_[2].get());
+ task_queues_[4]->SetQueueEnabledForTest(false);
+ selector_.DisableQueue(task_queues_[4].get());
+ // Disabling a queue should not affect its priority.
+ EXPECT_EQ(TaskQueue::kNormalPriority, task_queues_[2]->GetQueuePriority());
+ EXPECT_EQ(TaskQueue::kNormalPriority, task_queues_[4]->GetQueuePriority());
+ EXPECT_THAT(PopTasks(), testing::ElementsAre(0, 1, 3));
+
+ EXPECT_CALL(mock_observer, OnTaskQueueEnabled(_)).Times(2);
+ task_queues_[2]->SetQueueEnabledForTest(true);
+ selector_.EnableQueue(task_queues_[2].get());
+ selector_.SetQueuePriority(task_queues_[2].get(),
+ TaskQueue::kBestEffortPriority);
+ EXPECT_THAT(PopTasks(), testing::ElementsAre(2));
+ task_queues_[4]->SetQueueEnabledForTest(true);
+ selector_.EnableQueue(task_queues_[4].get());
+ EXPECT_THAT(PopTasks(), testing::ElementsAre(4));
+}
+
+TEST_F(TaskQueueSelectorTest, TestDisableChangePriorityThenEnable) {
+ EXPECT_TRUE(task_queues_[2]->delayed_work_queue()->Empty());
+ EXPECT_TRUE(task_queues_[2]->immediate_work_queue()->Empty());
+
+ task_queues_[2]->SetQueueEnabledForTest(false);
+ selector_.SetQueuePriority(task_queues_[2].get(),
+ TaskQueue::kHighestPriority);
+
+ size_t queue_order[] = {0, 1, 2, 3, 4};
+ PushTasks(queue_order, 5);
+
+ EXPECT_TRUE(task_queues_[2]->delayed_work_queue()->Empty());
+ EXPECT_FALSE(task_queues_[2]->immediate_work_queue()->Empty());
+ task_queues_[2]->SetQueueEnabledForTest(true);
+
+ EXPECT_EQ(TaskQueue::kHighestPriority, task_queues_[2]->GetQueuePriority());
+ EXPECT_THAT(PopTasks(), ::testing::ElementsAre(2, 0, 1, 3, 4));
+}
+
+TEST_F(TaskQueueSelectorTest, TestEmptyQueues) {
+ WorkQueue* chosen_work_queue = nullptr;
+ EXPECT_FALSE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+
+ // Test only disabled queues.
+ size_t queue_order[] = {0};
+ PushTasks(queue_order, 1);
+ task_queues_[0]->SetQueueEnabledForTest(false);
+ selector_.DisableQueue(task_queues_[0].get());
+ EXPECT_FALSE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+
+ // These tests are unusual since there's no TQM. To avoid a later DCHECK when
+ // deleting the task queue, we re-enable the queue here so the selector
+ // doesn't get out of sync.
+ task_queues_[0]->SetQueueEnabledForTest(true);
+ selector_.EnableQueue(task_queues_[0].get());
+}
+
+TEST_F(TaskQueueSelectorTest, TestAge) {
+ size_t enqueue_order[] = {10, 1, 2, 9, 4};
+ size_t queue_order[] = {0, 1, 2, 3, 4};
+ PushTasksWithEnqueueOrder(queue_order, enqueue_order, 5);
+ EXPECT_THAT(PopTasks(), testing::ElementsAre(1, 2, 4, 3, 0));
+}
+
+TEST_F(TaskQueueSelectorTest, TestControlStarvesOthers) {
+ size_t queue_order[] = {0, 1, 2, 3};
+ PushTasks(queue_order, 4);
+ selector_.SetQueuePriority(task_queues_[3].get(),
+ TaskQueue::kControlPriority);
+ selector_.SetQueuePriority(task_queues_[2].get(),
+ TaskQueue::kHighestPriority);
+ selector_.SetQueuePriority(task_queues_[1].get(),
+ TaskQueue::kBestEffortPriority);
+ for (int i = 0; i < 100; i++) {
+ WorkQueue* chosen_work_queue = nullptr;
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ EXPECT_EQ(task_queues_[3].get(), chosen_work_queue->task_queue());
+ // Don't remove task from queue to simulate all queues still being full.
+ }
+}
+
+TEST_F(TaskQueueSelectorTest, TestHighestPriorityDoesNotStarveHigh) {
+ size_t queue_order[] = {0, 1};
+ PushTasks(queue_order, 2);
+ selector_.SetQueuePriority(task_queues_[0].get(),
+ TaskQueue::kHighestPriority);
+ selector_.SetQueuePriority(task_queues_[1].get(), TaskQueue::kHighPriority);
+
+ size_t counts[] = {0, 0};
+ for (int i = 0; i < 100; i++) {
+ WorkQueue* chosen_work_queue = nullptr;
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ size_t chosen_queue_index =
+ queue_to_index_map_.find(chosen_work_queue->task_queue())->second;
+ counts[chosen_queue_index]++;
+ // Don't remove task from queue to simulate all queues still being full.
+ }
+ EXPECT_GT(counts[1], 0ul); // Check highest doesn't starve high.
+ EXPECT_GT(counts[0], counts[1]); // Check highest gets more chance to run.
+}
+
+TEST_F(TaskQueueSelectorTest, TestHighestPriorityDoesNotStarveHighOrNormal) {
+ size_t queue_order[] = {0, 1, 2};
+ PushTasks(queue_order, 3);
+ selector_.SetQueuePriority(task_queues_[0].get(),
+ TaskQueue::kHighestPriority);
+ selector_.SetQueuePriority(task_queues_[1].get(), TaskQueue::kHighPriority);
+
+ size_t counts[] = {0, 0, 0};
+ for (int i = 0; i < 100; i++) {
+ WorkQueue* chosen_work_queue = nullptr;
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ size_t chosen_queue_index =
+ queue_to_index_map_.find(chosen_work_queue->task_queue())->second;
+ counts[chosen_queue_index]++;
+ // Don't remove task from queue to simulate all queues still being full.
+ }
+
+ // Check highest runs more frequently then high.
+ EXPECT_GT(counts[0], counts[1]);
+
+ // Check high runs at least as frequently as normal.
+ EXPECT_GE(counts[1], counts[2]);
+
+ // Check normal isn't starved.
+ EXPECT_GT(counts[2], 0ul);
+}
+
+TEST_F(TaskQueueSelectorTest,
+ TestHighestPriorityDoesNotStarveHighOrNormalOrLow) {
+ size_t queue_order[] = {0, 1, 2, 3};
+ PushTasks(queue_order, 4);
+ selector_.SetQueuePriority(task_queues_[0].get(),
+ TaskQueue::kHighestPriority);
+ selector_.SetQueuePriority(task_queues_[1].get(), TaskQueue::kHighPriority);
+ selector_.SetQueuePriority(task_queues_[3].get(), TaskQueue::kLowPriority);
+
+ size_t counts[] = {0, 0, 0, 0};
+ for (int i = 0; i < 100; i++) {
+ WorkQueue* chosen_work_queue = nullptr;
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ size_t chosen_queue_index =
+ queue_to_index_map_.find(chosen_work_queue->task_queue())->second;
+ counts[chosen_queue_index]++;
+ // Don't remove task from queue to simulate all queues still being full.
+ }
+
+ // Check highest runs more frequently then high.
+ EXPECT_GT(counts[0], counts[1]);
+
+ // Check high runs at least as frequently as normal.
+ EXPECT_GE(counts[1], counts[2]);
+
+ // Check normal runs more frequently than low.
+ EXPECT_GT(counts[2], counts[3]);
+
+ // Check low isn't starved.
+ EXPECT_GT(counts[3], 0ul);
+}
+
+TEST_F(TaskQueueSelectorTest, TestHighPriorityDoesNotStarveNormal) {
+ size_t queue_order[] = {0, 1};
+ PushTasks(queue_order, 2);
+
+ selector_.SetQueuePriority(task_queues_[0].get(), TaskQueue::kHighPriority);
+
+ size_t counts[] = {0, 0, 0, 0};
+ for (int i = 0; i < 100; i++) {
+ WorkQueue* chosen_work_queue = nullptr;
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ size_t chosen_queue_index =
+ queue_to_index_map_.find(chosen_work_queue->task_queue())->second;
+ counts[chosen_queue_index]++;
+ // Don't remove task from queue to simulate all queues still being full.
+ }
+
+ // Check high runs more frequently then normal.
+ EXPECT_GT(counts[0], counts[1]);
+
+ // Check low isn't starved.
+ EXPECT_GT(counts[1], 0ul);
+}
+
+TEST_F(TaskQueueSelectorTest, TestHighPriorityDoesNotStarveNormalOrLow) {
+ size_t queue_order[] = {0, 1, 2};
+ PushTasks(queue_order, 3);
+ selector_.SetQueuePriority(task_queues_[0].get(), TaskQueue::kHighPriority);
+ selector_.SetQueuePriority(task_queues_[2].get(), TaskQueue::kLowPriority);
+
+ size_t counts[] = {0, 0, 0};
+ for (int i = 0; i < 100; i++) {
+ WorkQueue* chosen_work_queue = nullptr;
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ size_t chosen_queue_index =
+ queue_to_index_map_.find(chosen_work_queue->task_queue())->second;
+ counts[chosen_queue_index]++;
+ // Don't remove task from queue to simulate all queues still being full.
+ }
+
+ // Check high runs more frequently than normal.
+ EXPECT_GT(counts[0], counts[1]);
+
+ // Check normal runs more frequently than low.
+ EXPECT_GT(counts[1], counts[2]);
+
+ // Check low isn't starved.
+ EXPECT_GT(counts[2], 0ul);
+}
+
+TEST_F(TaskQueueSelectorTest, TestNormalPriorityDoesNotStarveLow) {
+ size_t queue_order[] = {0, 1, 2};
+ PushTasks(queue_order, 3);
+ selector_.SetQueuePriority(task_queues_[0].get(), TaskQueue::kLowPriority);
+ selector_.SetQueuePriority(task_queues_[1].get(),
+ TaskQueue::kBestEffortPriority);
+ size_t counts[] = {0, 0, 0};
+ for (int i = 0; i < 100; i++) {
+ WorkQueue* chosen_work_queue = nullptr;
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ size_t chosen_queue_index =
+ queue_to_index_map_.find(chosen_work_queue->task_queue())->second;
+ counts[chosen_queue_index]++;
+ // Don't remove task from queue to simulate all queues still being full.
+ }
+ EXPECT_GT(counts[0], 0ul); // Check normal doesn't starve low.
+ EXPECT_GT(counts[2], counts[0]); // Check normal gets more chance to run.
+ EXPECT_EQ(0ul, counts[1]); // Check best effort is starved.
+}
+
+TEST_F(TaskQueueSelectorTest, TestBestEffortGetsStarved) {
+ size_t queue_order[] = {0, 1};
+ PushTasks(queue_order, 2);
+ selector_.SetQueuePriority(task_queues_[0].get(),
+ TaskQueue::kBestEffortPriority);
+ EXPECT_EQ(TaskQueue::kNormalPriority, task_queues_[1]->GetQueuePriority());
+
+ // Check that normal priority tasks starve best effort.
+ WorkQueue* chosen_work_queue = nullptr;
+ for (int i = 0; i < 100; i++) {
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ EXPECT_EQ(task_queues_[1].get(), chosen_work_queue->task_queue());
+ // Don't remove task from queue to simulate all queues still being full.
+ }
+
+ // Check that highest priority tasks starve best effort.
+ selector_.SetQueuePriority(task_queues_[1].get(),
+ TaskQueue::kHighestPriority);
+ for (int i = 0; i < 100; i++) {
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ EXPECT_EQ(task_queues_[1].get(), chosen_work_queue->task_queue());
+ // Don't remove task from queue to simulate all queues still being full.
+ }
+
+ // Check that high priority tasks starve best effort.
+ selector_.SetQueuePriority(task_queues_[1].get(), TaskQueue::kHighPriority);
+ for (int i = 0; i < 100; i++) {
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ EXPECT_EQ(task_queues_[1].get(), chosen_work_queue->task_queue());
+ // Don't remove task from queue to simulate all queues still being full.
+ }
+
+ // Check that low priority tasks starve best effort.
+ selector_.SetQueuePriority(task_queues_[1].get(), TaskQueue::kLowPriority);
+ for (int i = 0; i < 100; i++) {
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ EXPECT_EQ(task_queues_[1].get(), chosen_work_queue->task_queue());
+ // Don't remove task from queue to simulate all queues still being full.
+ }
+
+ // Check that control priority tasks starve best effort.
+ selector_.SetQueuePriority(task_queues_[1].get(),
+ TaskQueue::kControlPriority);
+ for (int i = 0; i < 100; i++) {
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ EXPECT_EQ(task_queues_[1].get(), chosen_work_queue->task_queue());
+ // Don't remove task from queue to simulate all queues still being full.
+ }
+}
+
+TEST_F(TaskQueueSelectorTest,
+ TestHighPriorityStarvationScoreIncreasedOnlyWhenTasksArePresent) {
+ size_t queue_order[] = {0, 1};
+ PushTasks(queue_order, 2);
+ selector_.SetQueuePriority(task_queues_[0].get(),
+ TaskQueue::kHighestPriority);
+ selector_.SetQueuePriority(task_queues_[1].get(),
+ TaskQueue::kHighestPriority);
+
+ // Run a number of highest priority tasks needed to starve high priority
+ // tasks (when present).
+ for (size_t num_tasks = 0;
+ num_tasks <=
+ TaskQueueSelectorForTest::NumberOfHighestPriorityToStarveHighPriority();
+ num_tasks++) {
+ WorkQueue* chosen_work_queue = nullptr;
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ // Don't remove task from queue to simulate the queue is still full.
+ }
+
+ // Post a high priority task.
+ selector_.SetQueuePriority(task_queues_[1].get(), TaskQueue::kHighPriority);
+ WorkQueue* chosen_work_queue = nullptr;
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+
+ // Check that the high priority task is not considered starved, and thus isn't
+ // processed.
+ EXPECT_NE(
+ static_cast<int>(
+ queue_to_index_map_.find(chosen_work_queue->task_queue())->second),
+ 1);
+}
+
+TEST_F(TaskQueueSelectorTest,
+ TestNormalPriorityStarvationScoreIncreasedOnllWhenTasksArePresent) {
+ size_t queue_order[] = {0, 1};
+ PushTasks(queue_order, 2);
+ selector_.SetQueuePriority(task_queues_[0].get(),
+ TaskQueue::kHighestPriority);
+ selector_.SetQueuePriority(task_queues_[1].get(),
+ TaskQueue::kHighestPriority);
+
+ // Run a number of highest priority tasks needed to starve normal priority
+ // tasks (when present).
+ for (size_t num_tasks = 0;
+ num_tasks <= TaskQueueSelectorForTest::
+ NumberOfHighestPriorityToStarveNormalPriority();
+ num_tasks++) {
+ WorkQueue* chosen_work_queue = nullptr;
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ // Don't remove task from queue to simulate the queue is still full.
+ }
+
+ selector_.SetQueuePriority(task_queues_[0].get(), TaskQueue::kHighPriority);
+ selector_.SetQueuePriority(task_queues_[1].get(), TaskQueue::kHighPriority);
+
+ // Run a number of high priority tasks needed to starve normal priority
+ // tasks (when present).
+ for (size_t num_tasks = 0;
+ num_tasks <=
+ TaskQueueSelectorForTest::NumberOfHighPriorityToStarveNormalPriority();
+ num_tasks++) {
+ WorkQueue* chosen_work_queue = nullptr;
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ // Don't remove task from queue to simulate the queue is still full.
+ }
+
+ // Post a normal priority task.
+ selector_.SetQueuePriority(task_queues_[1].get(), TaskQueue::kNormalPriority);
+ WorkQueue* chosen_work_queue = nullptr;
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+
+ // Check that the normal priority task is not considered starved, and thus
+ // isn't processed.
+ EXPECT_NE(
+ static_cast<int>(
+ queue_to_index_map_.find(chosen_work_queue->task_queue())->second),
+ 1);
+}
+
+TEST_F(TaskQueueSelectorTest,
+ TestLowPriorityTaskStarvationOnlyIncreasedWhenTasksArePresent) {
+ size_t queue_order[] = {0, 1};
+ PushTasks(queue_order, 2);
+ selector_.SetQueuePriority(task_queues_[0].get(),
+ TaskQueue::kHighestPriority);
+ selector_.SetQueuePriority(task_queues_[1].get(),
+ TaskQueue::kHighestPriority);
+
+ // Run a number of highest priority tasks needed to starve low priority
+ // tasks (when present).
+ for (size_t num_tasks = 0;
+ num_tasks <=
+ TaskQueueSelectorForTest::NumberOfHighestPriorityToStarveLowPriority();
+ num_tasks++) {
+ WorkQueue* chosen_work_queue = nullptr;
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ // Don't remove task from queue to simulate the queue is still full.
+ }
+
+ selector_.SetQueuePriority(task_queues_[0].get(), TaskQueue::kHighPriority);
+ selector_.SetQueuePriority(task_queues_[1].get(), TaskQueue::kNormalPriority);
+
+ // Run a number of high/normal priority tasks needed to starve low priority
+ // tasks (when present).
+ for (size_t num_tasks = 0;
+ num_tasks <= TaskQueueSelectorForTest::
+ NumberOfHighAndNormalPriorityToStarveLowPriority();
+ num_tasks++) {
+ WorkQueue* chosen_work_queue = nullptr;
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ // Don't remove task from queue to simulate the queue is still full.
+ }
+
+ // Post a low priority task.
+ selector_.SetQueuePriority(task_queues_[1].get(), TaskQueue::kLowPriority);
+ WorkQueue* chosen_work_queue = nullptr;
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+
+ // Check that the low priority task is not considered starved, and thus
+ // isn't processed.
+ EXPECT_NE(
+ static_cast<int>(
+ queue_to_index_map_.find(chosen_work_queue->task_queue())->second),
+ 1);
+}
+
+TEST_F(TaskQueueSelectorTest, AllEnabledWorkQueuesAreEmpty) {
+ EXPECT_TRUE(selector_.AllEnabledWorkQueuesAreEmpty());
+ size_t queue_order[] = {0, 1};
+ PushTasks(queue_order, 2);
+
+ EXPECT_FALSE(selector_.AllEnabledWorkQueuesAreEmpty());
+ PopTasks();
+ EXPECT_TRUE(selector_.AllEnabledWorkQueuesAreEmpty());
+}
+
+TEST_F(TaskQueueSelectorTest, AllEnabledWorkQueuesAreEmpty_ControlPriority) {
+ size_t queue_order[] = {0};
+ PushTasks(queue_order, 1);
+
+ selector_.SetQueuePriority(task_queues_[0].get(),
+ TaskQueue::kControlPriority);
+
+ EXPECT_FALSE(selector_.AllEnabledWorkQueuesAreEmpty());
+}
+
+TEST_F(TaskQueueSelectorTest, ChooseOldestWithPriority_Empty) {
+ WorkQueue* chosen_work_queue = nullptr;
+ bool chose_delayed_over_immediate = false;
+ EXPECT_FALSE(prioritizing_selector()->ChooseOldestWithPriority(
+ TaskQueue::kNormalPriority, &chose_delayed_over_immediate,
+ &chosen_work_queue));
+ EXPECT_FALSE(chose_delayed_over_immediate);
+}
+
+TEST_F(TaskQueueSelectorTest, ChooseOldestWithPriority_OnlyDelayed) {
+ task_queues_[0]->delayed_work_queue()->Push(TaskQueueImpl::Task(
+ TaskQueue::PostedTask(test_closure_, FROM_HERE), TimeTicks(),
+ EnqueueOrder(), EnqueueOrder::FromIntForTesting(2)));
+
+ WorkQueue* chosen_work_queue = nullptr;
+ bool chose_delayed_over_immediate = false;
+ EXPECT_TRUE(prioritizing_selector()->ChooseOldestWithPriority(
+ TaskQueue::kNormalPriority, &chose_delayed_over_immediate,
+ &chosen_work_queue));
+ EXPECT_EQ(chosen_work_queue, task_queues_[0]->delayed_work_queue());
+ EXPECT_FALSE(chose_delayed_over_immediate);
+}
+
+TEST_F(TaskQueueSelectorTest, ChooseOldestWithPriority_OnlyImmediate) {
+ task_queues_[0]->immediate_work_queue()->Push(TaskQueueImpl::Task(
+ TaskQueue::PostedTask(test_closure_, FROM_HERE), TimeTicks(),
+ EnqueueOrder(), EnqueueOrder::FromIntForTesting(2)));
+
+ WorkQueue* chosen_work_queue = nullptr;
+ bool chose_delayed_over_immediate = false;
+ EXPECT_TRUE(prioritizing_selector()->ChooseOldestWithPriority(
+ TaskQueue::kNormalPriority, &chose_delayed_over_immediate,
+ &chosen_work_queue));
+ EXPECT_EQ(chosen_work_queue, task_queues_[0]->immediate_work_queue());
+ EXPECT_FALSE(chose_delayed_over_immediate);
+}
+
+TEST_F(TaskQueueSelectorTest, TestObserverWithOneBlockedQueue) {
+ TaskQueueSelectorForTest selector;
+ MockObserver mock_observer;
+ selector.SetTaskQueueSelectorObserver(&mock_observer);
+
+ EXPECT_CALL(mock_observer, OnTaskQueueEnabled(_)).Times(1);
+
+ std::unique_ptr<TaskQueueImpl> task_queue(NewTaskQueueWithBlockReporting());
+ selector.AddQueue(task_queue.get());
+
+ task_queue->SetQueueEnabledForTest(false);
+ selector.DisableQueue(task_queue.get());
+
+ TaskQueueImpl::Task task(TaskQueue::PostedTask(test_closure_, FROM_HERE),
+ TimeTicks(), EnqueueOrder(),
+ EnqueueOrder::FromIntForTesting(2));
+ task_queue->immediate_work_queue()->Push(std::move(task));
+
+ WorkQueue* chosen_work_queue;
+ EXPECT_FALSE(selector.SelectWorkQueueToService(&chosen_work_queue));
+
+ task_queue->SetQueueEnabledForTest(true);
+ selector.EnableQueue(task_queue.get());
+ selector.RemoveQueue(task_queue.get());
+ task_queue->UnregisterTaskQueue();
+}
+
+TEST_F(TaskQueueSelectorTest, TestObserverWithTwoBlockedQueues) {
+ TaskQueueSelectorForTest selector;
+ MockObserver mock_observer;
+ selector.SetTaskQueueSelectorObserver(&mock_observer);
+
+ std::unique_ptr<TaskQueueImpl> task_queue(NewTaskQueueWithBlockReporting());
+ std::unique_ptr<TaskQueueImpl> task_queue2(NewTaskQueueWithBlockReporting());
+ selector.AddQueue(task_queue.get());
+ selector.AddQueue(task_queue2.get());
+
+ task_queue->SetQueueEnabledForTest(false);
+ task_queue2->SetQueueEnabledForTest(false);
+ selector.DisableQueue(task_queue.get());
+ selector.DisableQueue(task_queue2.get());
+
+ selector.SetQueuePriority(task_queue2.get(), TaskQueue::kControlPriority);
+
+ TaskQueueImpl::Task task1(TaskQueue::PostedTask(test_closure_, FROM_HERE),
+ TimeTicks(), EnqueueOrder::FromIntForTesting(2),
+ EnqueueOrder::FromIntForTesting(2));
+ TaskQueueImpl::Task task2(TaskQueue::PostedTask(test_closure_, FROM_HERE),
+ TimeTicks(), EnqueueOrder::FromIntForTesting(3),
+ EnqueueOrder::FromIntForTesting(3));
+ task_queue->immediate_work_queue()->Push(std::move(task1));
+ task_queue2->immediate_work_queue()->Push(std::move(task2));
+
+ WorkQueue* chosen_work_queue;
+ EXPECT_FALSE(selector.SelectWorkQueueToService(&chosen_work_queue));
+ testing::Mock::VerifyAndClearExpectations(&mock_observer);
+
+ EXPECT_CALL(mock_observer, OnTaskQueueEnabled(_)).Times(2);
+
+ task_queue->SetQueueEnabledForTest(true);
+ selector.EnableQueue(task_queue.get());
+
+ selector.RemoveQueue(task_queue.get());
+ task_queue->UnregisterTaskQueue();
+ EXPECT_FALSE(selector.SelectWorkQueueToService(&chosen_work_queue));
+
+ task_queue2->SetQueueEnabledForTest(true);
+ selector.EnableQueue(task_queue2.get());
+ selector.RemoveQueue(task_queue2.get());
+ task_queue2->UnregisterTaskQueue();
+}
+
+struct ChooseOldestWithPriorityTestParam {
+ int delayed_task_enqueue_order;
+ int immediate_task_enqueue_order;
+ int immediate_starvation_count;
+ const char* expected_work_queue_name;
+ bool expected_did_starve_immediate_queue;
+};
+
+static const ChooseOldestWithPriorityTestParam
+ kChooseOldestWithPriorityTestCases[] = {
+ {1, 2, 0, "delayed", true}, {1, 2, 1, "delayed", true},
+ {1, 2, 2, "delayed", true}, {1, 2, 3, "immediate", false},
+ {1, 2, 4, "immediate", false}, {2, 1, 4, "immediate", false},
+ {2, 1, 4, "immediate", false},
+};
+
+class ChooseOldestWithPriorityTest
+ : public TaskQueueSelectorTest,
+ public testing::WithParamInterface<ChooseOldestWithPriorityTestParam> {};
+
+TEST_P(ChooseOldestWithPriorityTest, RoundRobinTest) {
+ task_queues_[0]->immediate_work_queue()->Push(TaskQueueImpl::Task(
+ TaskQueue::PostedTask(test_closure_, FROM_HERE), TimeTicks(),
+ EnqueueOrder::FromIntForTesting(GetParam().immediate_task_enqueue_order),
+ EnqueueOrder::FromIntForTesting(
+ GetParam().immediate_task_enqueue_order)));
+
+ task_queues_[0]->delayed_work_queue()->Push(TaskQueueImpl::Task(
+ TaskQueue::PostedTask(test_closure_, FROM_HERE), TimeTicks(),
+ EnqueueOrder::FromIntForTesting(GetParam().delayed_task_enqueue_order),
+ EnqueueOrder::FromIntForTesting(GetParam().delayed_task_enqueue_order)));
+
+ selector_.SetImmediateStarvationCountForTest(
+ GetParam().immediate_starvation_count);
+
+ WorkQueue* chosen_work_queue = nullptr;
+ bool chose_delayed_over_immediate = false;
+ EXPECT_TRUE(prioritizing_selector()->ChooseOldestWithPriority(
+ TaskQueue::kNormalPriority, &chose_delayed_over_immediate,
+ &chosen_work_queue));
+ EXPECT_EQ(chosen_work_queue->task_queue(), task_queues_[0].get());
+ EXPECT_STREQ(chosen_work_queue->name(), GetParam().expected_work_queue_name);
+ EXPECT_EQ(chose_delayed_over_immediate,
+ GetParam().expected_did_starve_immediate_queue);
+}
+
+INSTANTIATE_TEST_CASE_P(ChooseOldestWithPriorityTest,
+ ChooseOldestWithPriorityTest,
+ testing::ValuesIn(kChooseOldestWithPriorityTestCases));
+
+} // namespace task_queue_selector_unittest
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
diff --git a/chromium/base/task/sequence_manager/task_time_observer.h b/chromium/base/task/sequence_manager/task_time_observer.h
new file mode 100644
index 00000000000..151a94119bb
--- /dev/null
+++ b/chromium/base/task/sequence_manager/task_time_observer.h
@@ -0,0 +1,32 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_TASK_TIME_OBSERVER_H_
+#define BASE_TASK_SEQUENCE_MANAGER_TASK_TIME_OBSERVER_H_
+
+#include "base/time/time.h"
+
+namespace base {
+namespace sequence_manager {
+
+// TaskTimeObserver provides an API for observing completion of tasks.
+class TaskTimeObserver {
+ public:
+ TaskTimeObserver() = default;
+ virtual ~TaskTimeObserver() = default;
+
+ // To be called when task is about to start.
+ virtual void WillProcessTask(TimeTicks start_time) = 0;
+
+ // To be called when task is completed.
+ virtual void DidProcessTask(TimeTicks start_time, TimeTicks end_time) = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TaskTimeObserver);
+};
+
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_TASK_TIME_OBSERVER_H_
diff --git a/chromium/base/task/sequence_manager/thread_controller.h b/chromium/base/task/sequence_manager/thread_controller.h
index 67ad54596c4..539530602bb 100644
--- a/chromium/base/task/sequence_manager/thread_controller.h
+++ b/chromium/base/task/sequence_manager/thread_controller.h
@@ -20,7 +20,7 @@ namespace internal {
class SequencedTaskSource;
-// Implementation of this interface is used by TaskQueueManager to schedule
+// Implementation of this interface is used by SequenceManager to schedule
// actual work to be run. Hopefully we can stop using MessageLoop and this
// interface will become more concise.
class ThreadController {
@@ -32,8 +32,10 @@ class ThreadController {
// main message loop.
virtual void SetWorkBatchSize(int work_batch_size = 1) = 0;
- // Notifies that |pending_task| was enqueued. Needed for tracing purposes.
- virtual void DidQueueTask(const PendingTask& pending_task) = 0;
+ // Notifies that |pending_task| is about to be enqueued. Needed for tracing
+ // purposes. The impl may use this opportunity add metadata to |pending_task|
+ // before it is moved into the queue.
+ virtual void WillQueueTask(PendingTask* pending_task) = 0;
// Notify the controller that its associated sequence has immediate work
// to run. Shortly after this is called, the thread associated with this
@@ -51,6 +53,7 @@ class ThreadController {
// scheduled delayed work. Can only be called from the main sequence.
// NOTE: DelayTillNextTask might return a different value as it also takes
// immediate work into account.
+ // TODO(kraynov): Remove |lazy_now| parameter.
virtual void SetNextDelayedDoWork(LazyNow* lazy_now, TimeTicks run_time) = 0;
// Sets the sequenced task source from which to take tasks after
@@ -59,7 +62,7 @@ class ThreadController {
virtual void SetSequencedTaskSource(SequencedTaskSource*) = 0;
// TODO(altimin): Get rid of the methods below.
- // These methods exist due to current integration of TaskQueueManager
+ // These methods exist due to current integration of SequenceManager
// with MessageLoop.
virtual bool RunsTasksInCurrentSequence() = 0;
diff --git a/chromium/base/task/sequence_manager/thread_controller_impl.cc b/chromium/base/task/sequence_manager/thread_controller_impl.cc
new file mode 100644
index 00000000000..0d66af434d2
--- /dev/null
+++ b/chromium/base/task/sequence_manager/thread_controller_impl.cc
@@ -0,0 +1,273 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/thread_controller_impl.h"
+
+#include "base/bind.h"
+#include "base/memory/ptr_util.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/task/sequence_manager/lazy_now.h"
+#include "base/task/sequence_manager/sequenced_task_source.h"
+#include "base/trace_event/trace_event.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+ThreadControllerImpl::ThreadControllerImpl(
+ MessageLoop* message_loop,
+ scoped_refptr<SingleThreadTaskRunner> task_runner,
+ const TickClock* time_source)
+ : message_loop_(message_loop),
+ task_runner_(task_runner),
+ message_loop_task_runner_(message_loop ? message_loop->task_runner()
+ : nullptr),
+ time_source_(time_source),
+ weak_factory_(this) {
+ immediate_do_work_closure_ =
+ BindRepeating(&ThreadControllerImpl::DoWork, weak_factory_.GetWeakPtr(),
+ WorkType::kImmediate);
+ delayed_do_work_closure_ =
+ BindRepeating(&ThreadControllerImpl::DoWork, weak_factory_.GetWeakPtr(),
+ WorkType::kDelayed);
+}
+
+ThreadControllerImpl::~ThreadControllerImpl() = default;
+
+ThreadControllerImpl::AnySequence::AnySequence() = default;
+
+ThreadControllerImpl::AnySequence::~AnySequence() = default;
+
+ThreadControllerImpl::MainSequenceOnly::MainSequenceOnly() = default;
+
+ThreadControllerImpl::MainSequenceOnly::~MainSequenceOnly() = default;
+
+std::unique_ptr<ThreadControllerImpl> ThreadControllerImpl::Create(
+ MessageLoop* message_loop,
+ const TickClock* time_source) {
+ return WrapUnique(new ThreadControllerImpl(
+ message_loop, message_loop->task_runner(), time_source));
+}
+
+void ThreadControllerImpl::SetSequencedTaskSource(
+ SequencedTaskSource* sequence) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(sequence);
+ DCHECK(!sequence_);
+ sequence_ = sequence;
+}
+
+void ThreadControllerImpl::ScheduleWork() {
+ DCHECK(sequence_);
+ AutoLock lock(any_sequence_lock_);
+ // Don't post a DoWork if there's an immediate DoWork in flight or if we're
+ // inside a top level DoWork. We can rely on a continuation being posted as
+ // needed.
+ if (any_sequence().immediate_do_work_posted ||
+ (any_sequence().do_work_running_count > any_sequence().nesting_depth)) {
+ return;
+ }
+ any_sequence().immediate_do_work_posted = true;
+
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
+ "ThreadControllerImpl::ScheduleWork::PostTask");
+ task_runner_->PostTask(FROM_HERE, immediate_do_work_closure_);
+}
+
+void ThreadControllerImpl::SetNextDelayedDoWork(LazyNow* lazy_now,
+ TimeTicks run_time) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(sequence_);
+
+ if (main_sequence_only().next_delayed_do_work == run_time)
+ return;
+
+ // Cancel DoWork if it was scheduled and we set an "infinite" delay now.
+ if (run_time == TimeTicks::Max()) {
+ cancelable_delayed_do_work_closure_.Cancel();
+ main_sequence_only().next_delayed_do_work = TimeTicks::Max();
+ return;
+ }
+
+ // If DoWork is running then we don't need to do anything because it will post
+ // a continuation as needed. Bailing out here is by far the most common case.
+ if (main_sequence_only().do_work_running_count >
+ main_sequence_only().nesting_depth) {
+ return;
+ }
+
+ // If DoWork is about to run then we also don't need to do anything.
+ {
+ AutoLock lock(any_sequence_lock_);
+ if (any_sequence().immediate_do_work_posted)
+ return;
+ }
+
+ base::TimeDelta delay = std::max(TimeDelta(), run_time - lazy_now->Now());
+ TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
+ "ThreadControllerImpl::SetNextDelayedDoWork::PostDelayedTask",
+ "delay_ms", delay.InMillisecondsF());
+
+ main_sequence_only().next_delayed_do_work = run_time;
+ // Reset also causes cancellation of the previous DoWork task.
+ cancelable_delayed_do_work_closure_.Reset(delayed_do_work_closure_);
+ task_runner_->PostDelayedTask(
+ FROM_HERE, cancelable_delayed_do_work_closure_.callback(), delay);
+}
+
+bool ThreadControllerImpl::RunsTasksInCurrentSequence() {
+ return task_runner_->RunsTasksInCurrentSequence();
+}
+
+const TickClock* ThreadControllerImpl::GetClock() {
+ return time_source_;
+}
+
+void ThreadControllerImpl::SetDefaultTaskRunner(
+ scoped_refptr<SingleThreadTaskRunner> task_runner) {
+ if (!message_loop_)
+ return;
+ message_loop_->SetTaskRunner(task_runner);
+}
+
+void ThreadControllerImpl::RestoreDefaultTaskRunner() {
+ if (!message_loop_)
+ return;
+ message_loop_->SetTaskRunner(message_loop_task_runner_);
+}
+
+void ThreadControllerImpl::WillQueueTask(PendingTask* pending_task) {
+ task_annotator_.WillQueueTask("SequenceManager::PostTask", pending_task);
+}
+
+void ThreadControllerImpl::DoWork(WorkType work_type) {
+ TRACE_EVENT0("sequence_manager", "ThreadControllerImpl::DoWork");
+
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(sequence_);
+
+ {
+ AutoLock lock(any_sequence_lock_);
+ if (work_type == WorkType::kImmediate)
+ any_sequence().immediate_do_work_posted = false;
+ any_sequence().do_work_running_count++;
+ }
+
+ main_sequence_only().do_work_running_count++;
+
+ WeakPtr<ThreadControllerImpl> weak_ptr = weak_factory_.GetWeakPtr();
+ // TODO(scheduler-dev): Consider moving to a time based work batch instead.
+ for (int i = 0; i < main_sequence_only().work_batch_size_; i++) {
+ Optional<PendingTask> task = sequence_->TakeTask();
+ if (!task)
+ break;
+
+ {
+ TRACE_TASK_EXECUTION("ThreadControllerImpl::RunTask", *task);
+ task_annotator_.RunTask("ThreadControllerImpl::RunTask", &*task);
+ }
+
+ if (!weak_ptr)
+ return;
+
+ sequence_->DidRunTask();
+
+ // NOTE: https://crbug.com/828835.
+ // When we're running inside a nested RunLoop it may quit anytime, so any
+ // outstanding pending tasks must run in the outer RunLoop
+ // (see SequenceManagerTestWithMessageLoop.QuitWhileNested test).
+ // Unfortunately, it's MessageLoop who's receving that signal and we can't
+ // know it before we return from DoWork, hence, OnExitNestedRunLoop
+ // will be called later. Since we must implement ThreadController and
+ // SequenceManager in conformance with MessageLoop task runners, we need
+ // to disable this batching optimization while nested.
+ // Implementing RunLoop::Delegate ourselves will help to resolve this issue.
+ if (main_sequence_only().nesting_depth > 0)
+ break;
+ }
+
+ main_sequence_only().do_work_running_count--;
+
+ {
+ AutoLock lock(any_sequence_lock_);
+ any_sequence().do_work_running_count--;
+ DCHECK_GE(any_sequence().do_work_running_count, 0);
+ LazyNow lazy_now(time_source_);
+ TimeDelta delay_till_next_task = sequence_->DelayTillNextTask(&lazy_now);
+ if (delay_till_next_task <= TimeDelta()) {
+ // The next task needs to run immediately, post a continuation if needed.
+ if (!any_sequence().immediate_do_work_posted) {
+ any_sequence().immediate_do_work_posted = true;
+ task_runner_->PostTask(FROM_HERE, immediate_do_work_closure_);
+ }
+ } else if (delay_till_next_task < TimeDelta::Max()) {
+ // The next task needs to run after a delay, post a continuation if
+ // needed.
+ TimeTicks next_task_at = lazy_now.Now() + delay_till_next_task;
+ if (next_task_at != main_sequence_only().next_delayed_do_work) {
+ main_sequence_only().next_delayed_do_work = next_task_at;
+ cancelable_delayed_do_work_closure_.Reset(delayed_do_work_closure_);
+ task_runner_->PostDelayedTask(
+ FROM_HERE, cancelable_delayed_do_work_closure_.callback(),
+ delay_till_next_task);
+ }
+ } else {
+ // There is no next task scheduled.
+ main_sequence_only().next_delayed_do_work = TimeTicks::Max();
+ }
+ }
+}
+
+void ThreadControllerImpl::AddNestingObserver(
+ RunLoop::NestingObserver* observer) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ nesting_observer_ = observer;
+ RunLoop::AddNestingObserverOnCurrentThread(this);
+}
+
+void ThreadControllerImpl::RemoveNestingObserver(
+ RunLoop::NestingObserver* observer) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK_EQ(observer, nesting_observer_);
+ nesting_observer_ = nullptr;
+ RunLoop::RemoveNestingObserverOnCurrentThread(this);
+}
+
+void ThreadControllerImpl::OnBeginNestedRunLoop() {
+ main_sequence_only().nesting_depth++;
+ {
+ // We just entered a nested run loop, make sure there's a DoWork posted or
+ // the system will grind to a halt.
+ AutoLock lock(any_sequence_lock_);
+ any_sequence().nesting_depth++;
+ if (!any_sequence().immediate_do_work_posted) {
+ any_sequence().immediate_do_work_posted = true;
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
+ "ThreadControllerImpl::OnBeginNestedRunLoop::PostTask");
+ task_runner_->PostTask(FROM_HERE, immediate_do_work_closure_);
+ }
+ }
+ if (nesting_observer_)
+ nesting_observer_->OnBeginNestedRunLoop();
+}
+
+void ThreadControllerImpl::OnExitNestedRunLoop() {
+ main_sequence_only().nesting_depth--;
+ {
+ AutoLock lock(any_sequence_lock_);
+ any_sequence().nesting_depth--;
+ DCHECK_GE(any_sequence().nesting_depth, 0);
+ }
+ if (nesting_observer_)
+ nesting_observer_->OnExitNestedRunLoop();
+}
+
+void ThreadControllerImpl::SetWorkBatchSize(int work_batch_size) {
+ main_sequence_only().work_batch_size_ = work_batch_size;
+}
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
diff --git a/chromium/base/task/sequence_manager/thread_controller_impl.h b/chromium/base/task/sequence_manager/thread_controller_impl.h
new file mode 100644
index 00000000000..794feefb4bf
--- /dev/null
+++ b/chromium/base/task/sequence_manager/thread_controller_impl.h
@@ -0,0 +1,130 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_THREAD_CONTROLLER_IMPL_H_
+#define BASE_TASK_SEQUENCE_MANAGER_THREAD_CONTROLLER_IMPL_H_
+
+#include "base/cancelable_callback.h"
+#include "base/debug/task_annotator.h"
+#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
+#include "base/run_loop.h"
+#include "base/sequence_checker.h"
+#include "base/single_thread_task_runner.h"
+#include "base/task/sequence_manager/thread_controller.h"
+
+namespace base {
+
+// TODO(kraynov): https://crbug.com/828835
+// Consider going away from using MessageLoop in the renderer process.
+class MessageLoop;
+
+namespace sequence_manager {
+namespace internal {
+
+// TODO(kraynov): Rename to ThreadControllerWithMessageLoopImpl.
+class BASE_EXPORT ThreadControllerImpl : public ThreadController,
+ public RunLoop::NestingObserver {
+ public:
+ ~ThreadControllerImpl() override;
+
+ static std::unique_ptr<ThreadControllerImpl> Create(
+ MessageLoop* message_loop,
+ const TickClock* time_source);
+
+ // ThreadController:
+ void SetWorkBatchSize(int work_batch_size) override;
+ void WillQueueTask(PendingTask* pending_task) override;
+ void ScheduleWork() override;
+ void SetNextDelayedDoWork(LazyNow* lazy_now, TimeTicks run_time) override;
+ void SetSequencedTaskSource(SequencedTaskSource* sequence) override;
+ bool RunsTasksInCurrentSequence() override;
+ const TickClock* GetClock() override;
+ void SetDefaultTaskRunner(scoped_refptr<SingleThreadTaskRunner>) override;
+ void RestoreDefaultTaskRunner() override;
+ void AddNestingObserver(RunLoop::NestingObserver* observer) override;
+ void RemoveNestingObserver(RunLoop::NestingObserver* observer) override;
+
+ // RunLoop::NestingObserver:
+ void OnBeginNestedRunLoop() override;
+ void OnExitNestedRunLoop() override;
+
+ protected:
+ ThreadControllerImpl(MessageLoop* message_loop,
+ scoped_refptr<SingleThreadTaskRunner> task_runner,
+ const TickClock* time_source);
+
+ // TODO(altimin): Make these const. Blocked on removing
+ // lazy initialisation support.
+ MessageLoop* message_loop_;
+ scoped_refptr<SingleThreadTaskRunner> task_runner_;
+
+ RunLoop::NestingObserver* nesting_observer_ = nullptr;
+
+ private:
+ enum class WorkType { kImmediate, kDelayed };
+
+ void DoWork(WorkType work_type);
+
+ struct AnySequence {
+ AnySequence();
+ ~AnySequence();
+
+ int do_work_running_count = 0;
+ int nesting_depth = 0;
+ bool immediate_do_work_posted = false;
+ };
+
+ mutable Lock any_sequence_lock_;
+ AnySequence any_sequence_;
+
+ struct AnySequence& any_sequence() {
+ any_sequence_lock_.AssertAcquired();
+ return any_sequence_;
+ }
+ const struct AnySequence& any_sequence() const {
+ any_sequence_lock_.AssertAcquired();
+ return any_sequence_;
+ }
+
+ struct MainSequenceOnly {
+ MainSequenceOnly();
+ ~MainSequenceOnly();
+
+ int do_work_running_count = 0;
+ int nesting_depth = 0;
+ int work_batch_size_ = 1;
+
+ TimeTicks next_delayed_do_work = TimeTicks::Max();
+ };
+
+ SEQUENCE_CHECKER(sequence_checker_);
+ MainSequenceOnly main_sequence_only_;
+ MainSequenceOnly& main_sequence_only() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ return main_sequence_only_;
+ }
+ const MainSequenceOnly& main_sequence_only() const {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ return main_sequence_only_;
+ }
+
+ scoped_refptr<SingleThreadTaskRunner> message_loop_task_runner_;
+ const TickClock* time_source_;
+ RepeatingClosure immediate_do_work_closure_;
+ RepeatingClosure delayed_do_work_closure_;
+ CancelableClosure cancelable_delayed_do_work_closure_;
+ SequencedTaskSource* sequence_ = nullptr; // Not owned.
+ debug::TaskAnnotator task_annotator_;
+
+ WeakPtrFactory<ThreadControllerImpl> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(ThreadControllerImpl);
+};
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_THREAD_CONTROLLER_IMPL_H_
diff --git a/chromium/base/task/sequence_manager/thread_controller_with_message_pump_impl.cc b/chromium/base/task/sequence_manager/thread_controller_with_message_pump_impl.cc
new file mode 100644
index 00000000000..fbed88b404b
--- /dev/null
+++ b/chromium/base/task/sequence_manager/thread_controller_with_message_pump_impl.cc
@@ -0,0 +1,205 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/thread_controller_with_message_pump_impl.h"
+
+#include "base/auto_reset.h"
+#include "base/message_loop/message_pump_default.h"
+#include "base/time/tick_clock.h"
+#include "base/trace_event/trace_event.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+ThreadControllerWithMessagePumpImpl::ThreadControllerWithMessagePumpImpl(
+ TickClock* time_source)
+ : main_thread_id_(PlatformThread::CurrentId()),
+ pump_(new MessagePumpDefault()),
+ time_source_(time_source) {
+ RunLoop::RegisterDelegateForCurrentThread(this);
+}
+
+ThreadControllerWithMessagePumpImpl::~ThreadControllerWithMessagePumpImpl() {
+ // Destructors of RunLoop::Delegate and ThreadTaskRunnerHandle
+ // will do all the clean-up.
+}
+
+ThreadControllerWithMessagePumpImpl::MainThreadOnly::MainThreadOnly() = default;
+
+ThreadControllerWithMessagePumpImpl::MainThreadOnly::~MainThreadOnly() =
+ default;
+
+void ThreadControllerWithMessagePumpImpl::SetSequencedTaskSource(
+ SequencedTaskSource* task_source) {
+ DCHECK(task_source);
+ DCHECK(!main_thread_only().task_source);
+ main_thread_only().task_source = task_source;
+}
+
+void ThreadControllerWithMessagePumpImpl::SetWorkBatchSize(
+ int work_batch_size) {
+ DCHECK_GE(work_batch_size, 1);
+ main_thread_only().batch_size = work_batch_size;
+}
+
+void ThreadControllerWithMessagePumpImpl::WillQueueTask(
+ PendingTask* pending_task) {
+ task_annotator_.WillQueueTask("ThreadController::Task", pending_task);
+}
+
+void ThreadControllerWithMessagePumpImpl::ScheduleWork() {
+ // Continuation will be posted if necessary.
+ if (RunsTasksInCurrentSequence() && is_doing_work())
+ return;
+
+ pump_->ScheduleWork();
+}
+
+void ThreadControllerWithMessagePumpImpl::SetNextDelayedDoWork(
+ LazyNow* lazy_now,
+ TimeTicks run_time) {
+ if (main_thread_only().next_delayed_work == run_time)
+ return;
+ main_thread_only().next_delayed_work = run_time;
+
+ if (run_time == TimeTicks::Max())
+ return;
+
+ // Continuation will be posted if necessary.
+ if (is_doing_work())
+ return;
+
+ // |lazy_now| will be removed in this method soon.
+ DCHECK_LT(time_source_->NowTicks(), run_time);
+ pump_->ScheduleDelayedWork(run_time);
+}
+
+const TickClock* ThreadControllerWithMessagePumpImpl::GetClock() {
+ return time_source_;
+}
+
+bool ThreadControllerWithMessagePumpImpl::RunsTasksInCurrentSequence() {
+ return main_thread_id_ == PlatformThread::CurrentId();
+}
+
+void ThreadControllerWithMessagePumpImpl::SetDefaultTaskRunner(
+ scoped_refptr<SingleThreadTaskRunner> task_runner) {
+ main_thread_only().thread_task_runner_handle =
+ std::make_unique<ThreadTaskRunnerHandle>(task_runner);
+}
+
+void ThreadControllerWithMessagePumpImpl::RestoreDefaultTaskRunner() {
+ // There's no default task runner unlike with the MessageLoop.
+ main_thread_only().thread_task_runner_handle.reset();
+}
+
+void ThreadControllerWithMessagePumpImpl::AddNestingObserver(
+ RunLoop::NestingObserver* observer) {
+ DCHECK_LE(main_thread_only().run_depth, 1);
+ DCHECK(!main_thread_only().nesting_observer);
+ DCHECK(observer);
+ main_thread_only().nesting_observer = observer;
+}
+
+void ThreadControllerWithMessagePumpImpl::RemoveNestingObserver(
+ RunLoop::NestingObserver* observer) {
+ DCHECK_EQ(main_thread_only().nesting_observer, observer);
+ main_thread_only().nesting_observer = nullptr;
+}
+
+bool ThreadControllerWithMessagePumpImpl::DoWork() {
+ DCHECK(main_thread_only().task_source);
+ bool task_ran = false;
+
+ {
+ AutoReset<int> do_work_scope(&main_thread_only().do_work_depth,
+ main_thread_only().do_work_depth + 1);
+
+ for (int i = 0; i < main_thread_only().batch_size; i++) {
+ Optional<PendingTask> task = main_thread_only().task_source->TakeTask();
+ if (!task)
+ break;
+
+ TRACE_TASK_EXECUTION("ThreadController::Task", *task);
+ task_annotator_.RunTask("ThreadController::Task", &*task);
+ task_ran = true;
+
+ main_thread_only().task_source->DidRunTask();
+
+ if (main_thread_only().quit_do_work) {
+ // When Quit() is called we must stop running the batch because
+ // caller expects per-task granularity.
+ main_thread_only().quit_do_work = false;
+ return true;
+ }
+ }
+ } // DoWorkScope.
+
+ LazyNow lazy_now(time_source_);
+ TimeDelta do_work_delay =
+ main_thread_only().task_source->DelayTillNextTask(&lazy_now);
+ DCHECK_GE(do_work_delay, TimeDelta());
+ // Schedule a continuation.
+ if (do_work_delay.is_zero()) {
+ // Need to run new work immediately.
+ pump_->ScheduleWork();
+ } else if (do_work_delay != TimeDelta::Max()) {
+ SetNextDelayedDoWork(&lazy_now, lazy_now.Now() + do_work_delay);
+ } else {
+ SetNextDelayedDoWork(&lazy_now, TimeTicks::Max());
+ }
+
+ return task_ran;
+}
+
+bool ThreadControllerWithMessagePumpImpl::DoDelayedWork(
+ TimeTicks* next_run_time) {
+ // Delayed work is getting processed in DoWork().
+ return false;
+}
+
+bool ThreadControllerWithMessagePumpImpl::DoIdleWork() {
+ // RunLoop::Delegate knows whether we called Run() or RunUntilIdle().
+ if (ShouldQuitWhenIdle())
+ Quit();
+ return false;
+}
+
+void ThreadControllerWithMessagePumpImpl::Run(bool application_tasks_allowed) {
+ // No system messages are being processed by this class.
+ DCHECK(application_tasks_allowed);
+
+ // We already have a MessagePump::Run() running, so we're in a nested RunLoop.
+ if (main_thread_only().run_depth > 0 && main_thread_only().nesting_observer)
+ main_thread_only().nesting_observer->OnBeginNestedRunLoop();
+
+ {
+ AutoReset<int> run_scope(&main_thread_only().run_depth,
+ main_thread_only().run_depth + 1);
+ // MessagePump::Run() blocks until Quit() called, but previously started
+ // Run() calls continue to block.
+ pump_->Run(this);
+ }
+
+ // We'll soon continue to run an outer MessagePump::Run() loop.
+ if (main_thread_only().run_depth > 0 && main_thread_only().nesting_observer)
+ main_thread_only().nesting_observer->OnExitNestedRunLoop();
+}
+
+void ThreadControllerWithMessagePumpImpl::Quit() {
+ // Interrupt a batch of work.
+ if (is_doing_work())
+ main_thread_only().quit_do_work = true;
+ // If we're in a nested RunLoop, continuation will be posted if necessary.
+ pump_->Quit();
+}
+
+void ThreadControllerWithMessagePumpImpl::EnsureWorkScheduled() {
+ ScheduleWork();
+}
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
diff --git a/chromium/base/task/sequence_manager/thread_controller_with_message_pump_impl.h b/chromium/base/task/sequence_manager/thread_controller_with_message_pump_impl.h
new file mode 100644
index 00000000000..c19a2e8992b
--- /dev/null
+++ b/chromium/base/task/sequence_manager/thread_controller_with_message_pump_impl.h
@@ -0,0 +1,109 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_THREAD_CONTROLLER_WITH_MESSAGE_PUMP_IMPL_H_
+#define BASE_TASK_SEQUENCE_MANAGER_THREAD_CONTROLLER_WITH_MESSAGE_PUMP_IMPL_H_
+
+#include "base/debug/task_annotator.h"
+#include "base/message_loop/message_pump.h"
+#include "base/task/sequence_manager/sequenced_task_source.h"
+#include "base/task/sequence_manager/thread_controller.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread_task_runner_handle.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+// EXPERIMENTAL ThreadController implementation which doesn't use
+// MessageLoop or a task runner to schedule their DoWork calls.
+// See https://crbug.com/828835.
+class BASE_EXPORT ThreadControllerWithMessagePumpImpl
+ : public ThreadController,
+ public MessagePump::Delegate,
+ public RunLoop::Delegate {
+ public:
+ explicit ThreadControllerWithMessagePumpImpl(TickClock* time_source);
+ ~ThreadControllerWithMessagePumpImpl() override;
+
+ // ThreadController implementation:
+ void SetSequencedTaskSource(SequencedTaskSource* task_source) override;
+ void SetWorkBatchSize(int work_batch_size) override;
+ void WillQueueTask(PendingTask* pending_task) override;
+ void ScheduleWork() override;
+ void SetNextDelayedDoWork(LazyNow* lazy_now, TimeTicks run_time) override;
+ const TickClock* GetClock() override;
+ bool RunsTasksInCurrentSequence() override;
+ void SetDefaultTaskRunner(
+ scoped_refptr<SingleThreadTaskRunner> task_runner) override;
+ void RestoreDefaultTaskRunner() override;
+ void AddNestingObserver(RunLoop::NestingObserver* observer) override;
+ void RemoveNestingObserver(RunLoop::NestingObserver* observer) override;
+
+ private:
+ friend class DoWorkScope;
+ friend class RunScope;
+
+ // MessagePump::Delegate implementation.
+ bool DoWork() override;
+ bool DoDelayedWork(TimeTicks* next_run_time) override;
+ bool DoIdleWork() override;
+
+ // RunLoop::Delegate implementation.
+ void Run(bool application_tasks_allowed) override;
+ void Quit() override;
+ void EnsureWorkScheduled() override;
+
+ struct MainThreadOnly {
+ MainThreadOnly();
+ ~MainThreadOnly();
+
+ SequencedTaskSource* task_source = nullptr; // Not owned.
+ RunLoop::NestingObserver* nesting_observer = nullptr; // Not owned.
+ std::unique_ptr<ThreadTaskRunnerHandle> thread_task_runner_handle;
+
+ // Next delayed DoWork time for scheduling de-duplication purpose.
+ TimeTicks next_delayed_work;
+
+ // Indicates that we should yield DoWork ASAP.
+ bool quit_do_work = false;
+
+ // Number of tasks processed in a single DoWork invocation.
+ int batch_size = 1;
+
+ // Number of RunLoop layers currently running.
+ int run_depth = 0;
+
+ // Number of DoWork running, but only the inner-most one can take tasks.
+ // Must be equal to |run_depth| or |run_depth - 1|.
+ int do_work_depth = 0;
+ };
+
+ MainThreadOnly& main_thread_only() {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ return main_thread_only_;
+ }
+
+ // Returns true if there's a DoWork running on the inner-most nesting layer.
+ bool is_doing_work() const {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ return main_thread_only_.do_work_depth == main_thread_only_.run_depth &&
+ main_thread_only_.do_work_depth != 0;
+ }
+
+ MainThreadOnly main_thread_only_;
+ const PlatformThreadId main_thread_id_;
+ std::unique_ptr<MessagePump> pump_;
+ debug::TaskAnnotator task_annotator_;
+ TickClock* time_source_; // Not owned.
+
+ THREAD_CHECKER(main_thread_checker_);
+ DISALLOW_COPY_AND_ASSIGN(ThreadControllerWithMessagePumpImpl);
+};
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_THREAD_CONTROLLER_WITH_MESSAGE_PUMP_IMPL_H_
diff --git a/chromium/base/task/sequence_manager/time_domain.cc b/chromium/base/task/sequence_manager/time_domain.cc
new file mode 100644
index 00000000000..8f47eb3a23d
--- /dev/null
+++ b/chromium/base/task/sequence_manager/time_domain.cc
@@ -0,0 +1,136 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/time_domain.h"
+
+#include "base/task/sequence_manager/sequence_manager_impl.h"
+#include "base/task/sequence_manager/task_queue_impl.h"
+#include "base/task/sequence_manager/work_queue.h"
+
+namespace base {
+namespace sequence_manager {
+
+TimeDomain::TimeDomain() : sequence_manager_(nullptr) {}
+
+TimeDomain::~TimeDomain() {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+}
+
+void TimeDomain::OnRegisterWithSequenceManager(
+ internal::SequenceManagerImpl* sequence_manager) {
+ DCHECK(sequence_manager);
+ DCHECK(!sequence_manager_);
+ sequence_manager_ = sequence_manager;
+}
+
+SequenceManager* TimeDomain::sequence_manager() const {
+ DCHECK(sequence_manager_);
+ return sequence_manager_;
+}
+
+// TODO(kraynov): https://crbug.com/857101 Consider making an interface
+// for SequenceManagerImpl which will expose SetNextDelayedDoWork and
+// MaybeScheduleImmediateWork methods to make the functions below pure-virtual.
+
+void TimeDomain::SetNextDelayedDoWork(LazyNow* lazy_now, TimeTicks run_time) {
+ sequence_manager_->SetNextDelayedDoWork(lazy_now, run_time);
+}
+
+void TimeDomain::RequestDoWork() {
+ sequence_manager_->MaybeScheduleImmediateWork(FROM_HERE);
+}
+
+void TimeDomain::UnregisterQueue(internal::TaskQueueImpl* queue) {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ DCHECK_EQ(queue->GetTimeDomain(), this);
+ LazyNow lazy_now(CreateLazyNow());
+ SetNextWakeUpForQueue(queue, nullopt, &lazy_now);
+}
+
+void TimeDomain::SetNextWakeUpForQueue(
+ internal::TaskQueueImpl* queue,
+ Optional<internal::TaskQueueImpl::DelayedWakeUp> wake_up,
+ LazyNow* lazy_now) {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ DCHECK_EQ(queue->GetTimeDomain(), this);
+ DCHECK(queue->IsQueueEnabled() || !wake_up);
+
+ Optional<TimeTicks> previous_wake_up;
+ if (!delayed_wake_up_queue_.empty())
+ previous_wake_up = delayed_wake_up_queue_.Min().wake_up.time;
+
+ if (wake_up) {
+ // Insert a new wake-up into the heap.
+ if (queue->heap_handle().IsValid()) {
+ // O(log n)
+ delayed_wake_up_queue_.ChangeKey(queue->heap_handle(),
+ {wake_up.value(), queue});
+ } else {
+ // O(log n)
+ delayed_wake_up_queue_.insert({wake_up.value(), queue});
+ }
+ } else {
+ // Remove a wake-up from heap if present.
+ if (queue->heap_handle().IsValid())
+ delayed_wake_up_queue_.erase(queue->heap_handle());
+ }
+
+ Optional<TimeTicks> new_wake_up;
+ if (!delayed_wake_up_queue_.empty())
+ new_wake_up = delayed_wake_up_queue_.Min().wake_up.time;
+
+ // TODO(kraynov): https://crbug.com/857101 Review the relationship with
+ // SequenceManager's time. Right now it's not an issue since
+ // VirtualTimeDomain doesn't invoke SequenceManager itself.
+
+ if (new_wake_up) {
+ if (new_wake_up != previous_wake_up) {
+ // Update the wake-up.
+ SetNextDelayedDoWork(lazy_now, new_wake_up.value());
+ }
+ } else {
+ if (previous_wake_up) {
+ // No new wake-up to be set, cancel the previous one.
+ SetNextDelayedDoWork(lazy_now, TimeTicks::Max());
+ }
+ }
+}
+
+void TimeDomain::WakeUpReadyDelayedQueues(LazyNow* lazy_now) {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ // Wake up any queues with pending delayed work. Note std::multimap stores
+ // the elements sorted by key, so the begin() iterator points to the earliest
+ // queue to wake-up.
+ while (!delayed_wake_up_queue_.empty() &&
+ delayed_wake_up_queue_.Min().wake_up.time <= lazy_now->Now()) {
+ internal::TaskQueueImpl* queue = delayed_wake_up_queue_.Min().queue;
+ queue->WakeUpForDelayedWork(lazy_now);
+ }
+}
+
+Optional<TimeTicks> TimeDomain::NextScheduledRunTime() const {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ if (delayed_wake_up_queue_.empty())
+ return nullopt;
+ return delayed_wake_up_queue_.Min().wake_up.time;
+}
+
+void TimeDomain::AsValueInto(trace_event::TracedValue* state) const {
+ state->BeginDictionary();
+ state->SetString("name", GetName());
+ state->SetInteger("registered_delay_count", delayed_wake_up_queue_.size());
+ if (!delayed_wake_up_queue_.empty()) {
+ TimeDelta delay = delayed_wake_up_queue_.Min().wake_up.time - Now();
+ state->SetDouble("next_delay_ms", delay.InMillisecondsF());
+ }
+ AsValueIntoInternal(state);
+ state->EndDictionary();
+}
+
+void TimeDomain::AsValueIntoInternal(trace_event::TracedValue* state) const {
+ // Can be overriden to trace some additional state.
+}
+
+} // namespace sequence_manager
+} // namespace base
diff --git a/chromium/base/task/sequence_manager/time_domain.h b/chromium/base/task/sequence_manager/time_domain.h
new file mode 100644
index 00000000000..e9e487bd409
--- /dev/null
+++ b/chromium/base/task/sequence_manager/time_domain.h
@@ -0,0 +1,139 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_TIME_DOMAIN_H_
+#define BASE_TASK_SEQUENCE_MANAGER_TIME_DOMAIN_H_
+
+#include <map>
+
+#include "base/callback.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/task/sequence_manager/intrusive_heap.h"
+#include "base/task/sequence_manager/lazy_now.h"
+#include "base/task/sequence_manager/task_queue_impl.h"
+#include "base/time/time.h"
+
+namespace base {
+namespace sequence_manager {
+
+class SequenceManager;
+
+namespace internal {
+class SequenceManagerImpl;
+class TaskQueueImpl;
+} // namespace internal
+
+// TimeDomain wakes up TaskQueues when their delayed tasks are due to run.
+// This class allows overrides to enable clock overriding on some TaskQueues
+// (e.g. auto-advancing virtual time, throttled clock, etc).
+//
+// TaskQueue maintains its own next wake-up time and communicates it
+// to the TimeDomain, which aggregates wake-ups across registered TaskQueues
+// into a global wake-up, which ultimately gets passed to the ThreadController.
+class BASE_EXPORT TimeDomain {
+ public:
+ virtual ~TimeDomain();
+
+ // Returns LazyNow in TimeDomain's time.
+ // Can be called from any thread.
+ // TODO(alexclarke): Make this main thread only.
+ virtual LazyNow CreateLazyNow() const = 0;
+
+ // Evaluates TimeDomain's time.
+ // Can be called from any thread.
+ // TODO(alexclarke): Make this main thread only.
+ virtual TimeTicks Now() const = 0;
+
+ // Computes the delay until the time when TimeDomain needs to wake up
+ // some TaskQueue. Specific time domains (e.g. virtual or throttled) may
+ // return TimeDelata() if TaskQueues have any delayed tasks they deem
+ // eligible to run. It's also allowed to advance time domains's internal
+ // clock when this method is called.
+ // Can be called from main thread only.
+ // NOTE: |lazy_now| and the return value are in the SequenceManager's time.
+ virtual Optional<TimeDelta> DelayTillNextTask(LazyNow* lazy_now) = 0;
+
+ void AsValueInto(trace_event::TracedValue* state) const;
+
+ protected:
+ TimeDomain();
+
+ SequenceManager* sequence_manager() const;
+
+ // Returns the earliest scheduled wake up in the TimeDomain's time.
+ Optional<TimeTicks> NextScheduledRunTime() const;
+
+ size_t NumberOfScheduledWakeUps() const {
+ return delayed_wake_up_queue_.size();
+ }
+
+ // Tells SequenceManager to schedule delayed work, use TimeTicks::Max()
+ // to unschedule. Also cancels any previous requests.
+ // May be overriden to control wake ups manually.
+ virtual void SetNextDelayedDoWork(LazyNow* lazy_now, TimeTicks run_time);
+
+ // Tells SequenceManager to schedule immediate work.
+ // May be overriden to control wake ups manually.
+ virtual void RequestDoWork();
+
+ // For implementation-specific tracing.
+ virtual void AsValueIntoInternal(trace_event::TracedValue* state) const;
+ virtual const char* GetName() const = 0;
+
+ private:
+ friend class internal::TaskQueueImpl;
+ friend class internal::SequenceManagerImpl;
+ friend class TestTimeDomain;
+
+ // Called when the TimeDomain is registered.
+ // TODO(kraynov): Pass SequenceManager in the constructor.
+ void OnRegisterWithSequenceManager(
+ internal::SequenceManagerImpl* sequence_manager);
+
+ // Schedule TaskQueue to wake up at certain time, repeating calls with
+ // the same |queue| invalidate previous requests.
+ // Nullopt |wake_up| cancels a previously set wake up for |queue|.
+ // NOTE: |lazy_now| is provided in TimeDomain's time.
+ void SetNextWakeUpForQueue(
+ internal::TaskQueueImpl* queue,
+ Optional<internal::TaskQueueImpl::DelayedWakeUp> wake_up,
+ LazyNow* lazy_now);
+
+ // Remove the TaskQueue from any internal data sctructures.
+ void UnregisterQueue(internal::TaskQueueImpl* queue);
+
+ // Wake up each TaskQueue where the delay has elapsed.
+ void WakeUpReadyDelayedQueues(LazyNow* lazy_now);
+
+ struct ScheduledDelayedWakeUp {
+ internal::TaskQueueImpl::DelayedWakeUp wake_up;
+ internal::TaskQueueImpl* queue;
+
+ bool operator<=(const ScheduledDelayedWakeUp& other) const {
+ return wake_up <= other.wake_up;
+ }
+
+ void SetHeapHandle(internal::HeapHandle handle) {
+ DCHECK(handle.IsValid());
+ queue->set_heap_handle(handle);
+ }
+
+ void ClearHeapHandle() {
+ DCHECK(queue->heap_handle().IsValid());
+ queue->set_heap_handle(internal::HeapHandle());
+ }
+ };
+
+ internal::SequenceManagerImpl* sequence_manager_; // Not owned.
+ internal::IntrusiveHeap<ScheduledDelayedWakeUp> delayed_wake_up_queue_;
+
+ ThreadChecker main_thread_checker_;
+ DISALLOW_COPY_AND_ASSIGN(TimeDomain);
+};
+
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_TIME_DOMAIN_H_
diff --git a/chromium/base/task/sequence_manager/time_domain_unittest.cc b/chromium/base/task/sequence_manager/time_domain_unittest.cc
new file mode 100644
index 00000000000..951314f5a48
--- /dev/null
+++ b/chromium/base/task/sequence_manager/time_domain_unittest.cc
@@ -0,0 +1,324 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/time_domain.h"
+
+#include <memory>
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/task/sequence_manager/sequence_manager_impl.h"
+#include "base/task/sequence_manager/task_queue_impl.h"
+#include "base/task/sequence_manager/work_queue.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+using testing::_;
+using testing::AnyNumber;
+using testing::Mock;
+
+namespace base {
+namespace sequence_manager {
+
+class TaskQueueImplForTest : public internal::TaskQueueImpl {
+ public:
+ TaskQueueImplForTest(internal::SequenceManagerImpl* sequence_manager,
+ TimeDomain* time_domain,
+ const TaskQueue::Spec& spec)
+ : TaskQueueImpl(sequence_manager, time_domain, spec) {}
+ ~TaskQueueImplForTest() {}
+
+ using TaskQueueImpl::SetDelayedWakeUpForTesting;
+};
+
+class TestTimeDomain : public TimeDomain {
+ public:
+ TestTimeDomain() : now_(TimeTicks() + TimeDelta::FromSeconds(1)) {}
+
+ ~TestTimeDomain() override = default;
+
+ using TimeDomain::NextScheduledRunTime;
+ using TimeDomain::SetNextWakeUpForQueue;
+ using TimeDomain::UnregisterQueue;
+ using TimeDomain::WakeUpReadyDelayedQueues;
+
+ LazyNow CreateLazyNow() const override { return LazyNow(now_); }
+ TimeTicks Now() const override { return now_; }
+
+ Optional<TimeDelta> DelayTillNextTask(LazyNow* lazy_now) override {
+ return Optional<TimeDelta>();
+ }
+
+ void AsValueIntoInternal(trace_event::TracedValue* state) const override {}
+ const char* GetName() const override { return "Test"; }
+
+ internal::TaskQueueImpl* NextScheduledTaskQueue() const {
+ if (delayed_wake_up_queue_.empty())
+ return nullptr;
+ return delayed_wake_up_queue_.Min().queue;
+ }
+
+ MOCK_METHOD2(SetNextDelayedDoWork,
+ void(LazyNow* lazy_now, TimeTicks run_time));
+
+ void SetNow(TimeTicks now) { now_ = now; }
+
+ private:
+ TimeTicks now_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestTimeDomain);
+};
+
+class TimeDomainTest : public testing::Test {
+ public:
+ void SetUp() final {
+ time_domain_ = WrapUnique(CreateTestTimeDomain());
+ task_queue_ = std::make_unique<TaskQueueImplForTest>(
+ nullptr, time_domain_.get(), TaskQueue::Spec("test"));
+ }
+
+ void TearDown() final {
+ if (task_queue_)
+ task_queue_->UnregisterTaskQueue();
+ }
+
+ virtual TestTimeDomain* CreateTestTimeDomain() {
+ return new TestTimeDomain();
+ }
+
+ std::unique_ptr<TestTimeDomain> time_domain_;
+ std::unique_ptr<TaskQueueImplForTest> task_queue_;
+};
+
+TEST_F(TimeDomainTest, ScheduleWakeUpForQueue) {
+ TimeDelta delay = TimeDelta::FromMilliseconds(10);
+ TimeTicks delayed_runtime = time_domain_->Now() + delay;
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, delayed_runtime));
+ TimeTicks now = time_domain_->Now();
+ LazyNow lazy_now(now);
+ task_queue_->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{now + delay, 0});
+
+ EXPECT_EQ(delayed_runtime, time_domain_->NextScheduledRunTime());
+
+ EXPECT_EQ(task_queue_.get(), time_domain_->NextScheduledTaskQueue());
+ Mock::VerifyAndClearExpectations(time_domain_.get());
+
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, TimeTicks::Max()))
+ .Times(AnyNumber());
+}
+
+TEST_F(TimeDomainTest, ScheduleWakeUpForQueueSupersedesPreviousWakeUp) {
+ TimeDelta delay1 = TimeDelta::FromMilliseconds(10);
+ TimeDelta delay2 = TimeDelta::FromMilliseconds(100);
+ TimeTicks delayed_runtime1 = time_domain_->Now() + delay1;
+ TimeTicks delayed_runtime2 = time_domain_->Now() + delay2;
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, delayed_runtime1));
+ TimeTicks now = time_domain_->Now();
+ LazyNow lazy_now(now);
+ task_queue_->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{delayed_runtime1, 0});
+
+ EXPECT_EQ(delayed_runtime1, time_domain_->NextScheduledRunTime());
+
+ Mock::VerifyAndClearExpectations(time_domain_.get());
+
+ // Now schedule a later wake_up, which should replace the previously
+ // requested one.
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, delayed_runtime2));
+ task_queue_->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{delayed_runtime2, 0});
+
+ EXPECT_EQ(delayed_runtime2, time_domain_->NextScheduledRunTime());
+ Mock::VerifyAndClearExpectations(time_domain_.get());
+
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, TimeTicks::Max()))
+ .Times(AnyNumber());
+}
+
+TEST_F(TimeDomainTest, SetNextDelayedDoWork_OnlyCalledForEarlierTasks) {
+ std::unique_ptr<TaskQueueImplForTest> task_queue2 =
+ std::make_unique<TaskQueueImplForTest>(nullptr, time_domain_.get(),
+ TaskQueue::Spec("test"));
+
+ std::unique_ptr<TaskQueueImplForTest> task_queue3 =
+ std::make_unique<TaskQueueImplForTest>(nullptr, time_domain_.get(),
+ TaskQueue::Spec("test"));
+
+ std::unique_ptr<TaskQueueImplForTest> task_queue4 =
+ std::make_unique<TaskQueueImplForTest>(nullptr, time_domain_.get(),
+ TaskQueue::Spec("test"));
+
+ TimeDelta delay1 = TimeDelta::FromMilliseconds(10);
+ TimeDelta delay2 = TimeDelta::FromMilliseconds(20);
+ TimeDelta delay3 = TimeDelta::FromMilliseconds(30);
+ TimeDelta delay4 = TimeDelta::FromMilliseconds(1);
+
+ // SetNextDelayedDoWork should always be called if there are no other
+ // wake-ups.
+ TimeTicks now = time_domain_->Now();
+ LazyNow lazy_now(now);
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, now + delay1));
+ task_queue_->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{now + delay1, 0});
+
+ Mock::VerifyAndClearExpectations(time_domain_.get());
+
+ // SetNextDelayedDoWork should not be called when scheduling later tasks.
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, _)).Times(0);
+ task_queue2->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{now + delay2, 0});
+ task_queue3->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{now + delay3, 0});
+
+ // SetNextDelayedDoWork should be called when scheduling earlier tasks.
+ Mock::VerifyAndClearExpectations(time_domain_.get());
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, now + delay4));
+ task_queue4->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{now + delay4, 0});
+
+ Mock::VerifyAndClearExpectations(time_domain_.get());
+
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, _)).Times(2);
+ task_queue2->UnregisterTaskQueue();
+ task_queue3->UnregisterTaskQueue();
+ task_queue4->UnregisterTaskQueue();
+}
+
+TEST_F(TimeDomainTest, UnregisterQueue) {
+ std::unique_ptr<TaskQueueImplForTest> task_queue2_ =
+ std::make_unique<TaskQueueImplForTest>(nullptr, time_domain_.get(),
+ TaskQueue::Spec("test"));
+
+ TimeTicks now = time_domain_->Now();
+ LazyNow lazy_now(now);
+ TimeTicks wake_up1 = now + TimeDelta::FromMilliseconds(10);
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, wake_up1)).Times(1);
+ task_queue_->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{wake_up1, 0});
+ TimeTicks wake_up2 = now + TimeDelta::FromMilliseconds(100);
+ task_queue2_->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{wake_up2, 0});
+
+ EXPECT_EQ(task_queue_.get(), time_domain_->NextScheduledTaskQueue());
+
+ testing::Mock::VerifyAndClearExpectations(time_domain_.get());
+
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, wake_up2)).Times(1);
+
+ time_domain_->UnregisterQueue(task_queue_.get());
+ task_queue_ = std::unique_ptr<TaskQueueImplForTest>();
+ EXPECT_EQ(task_queue2_.get(), time_domain_->NextScheduledTaskQueue());
+
+ testing::Mock::VerifyAndClearExpectations(time_domain_.get());
+
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, TimeTicks::Max()))
+ .Times(1);
+
+ time_domain_->UnregisterQueue(task_queue2_.get());
+ EXPECT_FALSE(time_domain_->NextScheduledTaskQueue());
+}
+
+TEST_F(TimeDomainTest, WakeUpReadyDelayedQueues) {
+ TimeDelta delay = TimeDelta::FromMilliseconds(50);
+ TimeTicks now = time_domain_->Now();
+ LazyNow lazy_now_1(now);
+ TimeTicks delayed_runtime = now + delay;
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, delayed_runtime));
+ task_queue_->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{delayed_runtime, 0});
+
+ EXPECT_EQ(delayed_runtime, time_domain_->NextScheduledRunTime());
+
+ time_domain_->WakeUpReadyDelayedQueues(&lazy_now_1);
+ EXPECT_EQ(delayed_runtime, time_domain_->NextScheduledRunTime());
+
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, TimeTicks::Max()));
+ time_domain_->SetNow(delayed_runtime);
+ LazyNow lazy_now_2(time_domain_->CreateLazyNow());
+ time_domain_->WakeUpReadyDelayedQueues(&lazy_now_2);
+ ASSERT_FALSE(time_domain_->NextScheduledRunTime());
+}
+
+TEST_F(TimeDomainTest, WakeUpReadyDelayedQueuesWithIdenticalRuntimes) {
+ int sequence_num = 0;
+ TimeDelta delay = TimeDelta::FromMilliseconds(50);
+ TimeTicks now = time_domain_->Now();
+ LazyNow lazy_now(now);
+ TimeTicks delayed_runtime = now + delay;
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, delayed_runtime));
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, TimeTicks::Max()));
+
+ std::unique_ptr<TaskQueueImplForTest> task_queue2 =
+ std::make_unique<TaskQueueImplForTest>(nullptr, time_domain_.get(),
+ TaskQueue::Spec("test"));
+
+ task_queue2->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{delayed_runtime, ++sequence_num});
+ task_queue_->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{delayed_runtime, ++sequence_num});
+
+ time_domain_->WakeUpReadyDelayedQueues(&lazy_now);
+
+ // The second task queue should wake up first since it has a lower sequence
+ // number.
+ EXPECT_EQ(task_queue2.get(), time_domain_->NextScheduledTaskQueue());
+
+ task_queue2->UnregisterTaskQueue();
+}
+
+TEST_F(TimeDomainTest, CancelDelayedWork) {
+ TimeTicks now = time_domain_->Now();
+ LazyNow lazy_now(now);
+ TimeTicks run_time = now + TimeDelta::FromMilliseconds(20);
+
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, run_time));
+ task_queue_->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{run_time, 0});
+
+ EXPECT_EQ(task_queue_.get(), time_domain_->NextScheduledTaskQueue());
+
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, TimeTicks::Max()));
+ task_queue_->SetDelayedWakeUpForTesting(nullopt);
+ EXPECT_FALSE(time_domain_->NextScheduledTaskQueue());
+}
+
+TEST_F(TimeDomainTest, CancelDelayedWork_TwoQueues) {
+ std::unique_ptr<TaskQueueImplForTest> task_queue2 =
+ std::make_unique<TaskQueueImplForTest>(nullptr, time_domain_.get(),
+ TaskQueue::Spec("test"));
+
+ TimeTicks now = time_domain_->Now();
+ LazyNow lazy_now(now);
+ TimeTicks run_time1 = now + TimeDelta::FromMilliseconds(20);
+ TimeTicks run_time2 = now + TimeDelta::FromMilliseconds(40);
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, run_time1));
+ task_queue_->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{run_time1, 0});
+ Mock::VerifyAndClearExpectations(time_domain_.get());
+
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, _)).Times(0);
+ task_queue2->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{run_time2, 0});
+ Mock::VerifyAndClearExpectations(time_domain_.get());
+
+ EXPECT_EQ(task_queue_.get(), time_domain_->NextScheduledTaskQueue());
+
+ EXPECT_EQ(run_time1, time_domain_->NextScheduledRunTime());
+
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, run_time2));
+ task_queue_->SetDelayedWakeUpForTesting(nullopt);
+ EXPECT_EQ(task_queue2.get(), time_domain_->NextScheduledTaskQueue());
+
+ EXPECT_EQ(run_time2, time_domain_->NextScheduledRunTime());
+
+ Mock::VerifyAndClearExpectations(time_domain_.get());
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, _))
+ .Times(AnyNumber());
+
+ // Tidy up.
+ task_queue2->UnregisterTaskQueue();
+}
+
+} // namespace sequence_manager
+} // namespace base
diff --git a/chromium/base/task/sequence_manager/work_queue.cc b/chromium/base/task/sequence_manager/work_queue.cc
new file mode 100644
index 00000000000..4d95f4b7731
--- /dev/null
+++ b/chromium/base/task/sequence_manager/work_queue.cc
@@ -0,0 +1,236 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/work_queue.h"
+
+#include "base/task/sequence_manager/work_queue_sets.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+WorkQueue::WorkQueue(TaskQueueImpl* task_queue,
+ const char* name,
+ QueueType queue_type)
+ : task_queue_(task_queue), name_(name), queue_type_(queue_type) {}
+
+void WorkQueue::AsValueInto(TimeTicks now,
+ trace_event::TracedValue* state) const {
+ for (const TaskQueueImpl::Task& task : tasks_) {
+ TaskQueueImpl::TaskAsValueInto(task, now, state);
+ }
+}
+
+WorkQueue::~WorkQueue() {
+ DCHECK(!work_queue_sets_) << task_queue_->GetName() << " : "
+ << work_queue_sets_->GetName() << " : " << name_;
+}
+
+const TaskQueueImpl::Task* WorkQueue::GetFrontTask() const {
+ if (tasks_.empty())
+ return nullptr;
+ return &tasks_.front();
+}
+
+const TaskQueueImpl::Task* WorkQueue::GetBackTask() const {
+ if (tasks_.empty())
+ return nullptr;
+ return &tasks_.back();
+}
+
+bool WorkQueue::BlockedByFence() const {
+ if (!fence_)
+ return false;
+
+ // If the queue is empty then any future tasks will have a higher enqueue
+ // order and will be blocked. The queue is also blocked if the head is past
+ // the fence.
+ return tasks_.empty() || tasks_.front().enqueue_order() >= fence_;
+}
+
+bool WorkQueue::GetFrontTaskEnqueueOrder(EnqueueOrder* enqueue_order) const {
+ if (tasks_.empty() || BlockedByFence())
+ return false;
+ // Quick sanity check.
+ DCHECK_LE(tasks_.front().enqueue_order(), tasks_.back().enqueue_order())
+ << task_queue_->GetName() << " : " << work_queue_sets_->GetName() << " : "
+ << name_;
+ *enqueue_order = tasks_.front().enqueue_order();
+ return true;
+}
+
+void WorkQueue::Push(TaskQueueImpl::Task task) {
+ bool was_empty = tasks_.empty();
+#ifndef NDEBUG
+ DCHECK(task.enqueue_order_set());
+#endif
+
+ // Make sure the |enqueue_order()| is monotonically increasing.
+ DCHECK(was_empty || tasks_.rbegin()->enqueue_order() < task.enqueue_order());
+
+ // Amoritized O(1).
+ tasks_.push_back(std::move(task));
+
+ if (!was_empty)
+ return;
+
+ // If we hit the fence, pretend to WorkQueueSets that we're empty.
+ if (work_queue_sets_ && !BlockedByFence())
+ work_queue_sets_->OnTaskPushedToEmptyQueue(this);
+}
+
+void WorkQueue::PushNonNestableTaskToFront(TaskQueueImpl::Task task) {
+ DCHECK(task.nestable == Nestable::kNonNestable);
+
+ bool was_empty = tasks_.empty();
+ bool was_blocked = BlockedByFence();
+#ifndef NDEBUG
+ DCHECK(task.enqueue_order_set());
+#endif
+
+ if (!was_empty) {
+ // Make sure the |enqueue_order()| is monotonically increasing.
+ DCHECK_LE(task.enqueue_order(), tasks_.front().enqueue_order())
+ << task_queue_->GetName() << " : " << work_queue_sets_->GetName()
+ << " : " << name_;
+ }
+
+ // Amoritized O(1).
+ tasks_.push_front(std::move(task));
+
+ if (!work_queue_sets_)
+ return;
+
+ // Pretend to WorkQueueSets that nothing has changed if we're blocked.
+ if (BlockedByFence())
+ return;
+
+ // Pushing task to front may unblock the fence.
+ if (was_empty || was_blocked) {
+ work_queue_sets_->OnTaskPushedToEmptyQueue(this);
+ } else {
+ work_queue_sets_->OnFrontTaskChanged(this);
+ }
+}
+
+void WorkQueue::ReloadEmptyImmediateQueue() {
+ DCHECK(tasks_.empty());
+
+ task_queue_->ReloadEmptyImmediateQueue(&tasks_);
+ if (tasks_.empty())
+ return;
+
+ // If we hit the fence, pretend to WorkQueueSets that we're empty.
+ if (work_queue_sets_ && !BlockedByFence())
+ work_queue_sets_->OnTaskPushedToEmptyQueue(this);
+}
+
+TaskQueueImpl::Task WorkQueue::TakeTaskFromWorkQueue() {
+ DCHECK(work_queue_sets_);
+ DCHECK(!tasks_.empty());
+
+ TaskQueueImpl::Task pending_task = std::move(tasks_.front());
+ tasks_.pop_front();
+ // NB immediate tasks have a different pipeline to delayed ones.
+ if (queue_type_ == QueueType::kImmediate && tasks_.empty()) {
+ // Short-circuit the queue reload so that OnPopQueue does the right thing.
+ task_queue_->ReloadEmptyImmediateQueue(&tasks_);
+ }
+
+ // OnPopQueue calls GetFrontTaskEnqueueOrder which checks BlockedByFence() so
+ // we don't need to here.
+ work_queue_sets_->OnPopQueue(this);
+ task_queue_->TraceQueueSize();
+ return pending_task;
+}
+
+bool WorkQueue::RemoveAllCanceledTasksFromFront() {
+ DCHECK(work_queue_sets_);
+ bool task_removed = false;
+ while (!tasks_.empty() &&
+ (!tasks_.front().task || tasks_.front().task.IsCancelled())) {
+ tasks_.pop_front();
+ task_removed = true;
+ }
+ if (task_removed) {
+ // NB immediate tasks have a different pipeline to delayed ones.
+ if (queue_type_ == QueueType::kImmediate && tasks_.empty()) {
+ // Short-circuit the queue reload so that OnPopQueue does the right thing.
+ task_queue_->ReloadEmptyImmediateQueue(&tasks_);
+ }
+ work_queue_sets_->OnPopQueue(this);
+ task_queue_->TraceQueueSize();
+ }
+ return task_removed;
+}
+
+void WorkQueue::AssignToWorkQueueSets(WorkQueueSets* work_queue_sets) {
+ work_queue_sets_ = work_queue_sets;
+}
+
+void WorkQueue::AssignSetIndex(size_t work_queue_set_index) {
+ work_queue_set_index_ = work_queue_set_index;
+}
+
+bool WorkQueue::InsertFenceImpl(EnqueueOrder fence) {
+ DCHECK_NE(fence, 0u);
+ DCHECK(fence >= fence_ || fence == EnqueueOrder::blocking_fence());
+ bool was_blocked_by_fence = BlockedByFence();
+ fence_ = fence;
+ return was_blocked_by_fence;
+}
+
+void WorkQueue::InsertFenceSilently(EnqueueOrder fence) {
+ // Ensure that there is no fence present or a new one blocks queue completely.
+ DCHECK(!fence_ || fence_ == EnqueueOrder::blocking_fence());
+ InsertFenceImpl(fence);
+}
+
+bool WorkQueue::InsertFence(EnqueueOrder fence) {
+ bool was_blocked_by_fence = InsertFenceImpl(fence);
+
+ // Moving the fence forward may unblock some tasks.
+ if (work_queue_sets_ && !tasks_.empty() && was_blocked_by_fence &&
+ !BlockedByFence()) {
+ work_queue_sets_->OnTaskPushedToEmptyQueue(this);
+ return true;
+ }
+ // Fence insertion may have blocked all tasks in this work queue.
+ if (BlockedByFence())
+ work_queue_sets_->OnQueueBlocked(this);
+ return false;
+}
+
+bool WorkQueue::RemoveFence() {
+ bool was_blocked_by_fence = BlockedByFence();
+ fence_ = EnqueueOrder::none();
+ if (work_queue_sets_ && !tasks_.empty() && was_blocked_by_fence) {
+ work_queue_sets_->OnTaskPushedToEmptyQueue(this);
+ return true;
+ }
+ return false;
+}
+
+bool WorkQueue::ShouldRunBefore(const WorkQueue* other_queue) const {
+ DCHECK(!tasks_.empty());
+ DCHECK(!other_queue->tasks_.empty());
+ EnqueueOrder enqueue_order;
+ EnqueueOrder other_enqueue_order;
+ bool have_task = GetFrontTaskEnqueueOrder(&enqueue_order);
+ bool have_other_task =
+ other_queue->GetFrontTaskEnqueueOrder(&other_enqueue_order);
+ DCHECK(have_task);
+ DCHECK(have_other_task);
+ return enqueue_order < other_enqueue_order;
+}
+
+void WorkQueue::PopTaskForTesting() {
+ if (tasks_.empty())
+ return;
+ tasks_.pop_front();
+}
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
diff --git a/chromium/base/task/sequence_manager/work_queue.h b/chromium/base/task/sequence_manager/work_queue.h
new file mode 100644
index 00000000000..5197949c503
--- /dev/null
+++ b/chromium/base/task/sequence_manager/work_queue.h
@@ -0,0 +1,152 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_WORK_QUEUE_H_
+#define BASE_TASK_SEQUENCE_MANAGER_WORK_QUEUE_H_
+
+#include "base/base_export.h"
+#include "base/task/sequence_manager/enqueue_order.h"
+#include "base/task/sequence_manager/intrusive_heap.h"
+#include "base/task/sequence_manager/sequenced_task_source.h"
+#include "base/task/sequence_manager/task_queue_impl.h"
+#include "base/trace_event/trace_event.h"
+#include "base/trace_event/trace_event_argument.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+class WorkQueueSets;
+
+// This class keeps track of immediate and delayed tasks which are due to run
+// now. It interfaces deeply with WorkQueueSets which keeps track of which queue
+// (with a given priority) contains the oldest task.
+//
+// If a fence is inserted, WorkQueue behaves normally up until
+// TakeTaskFromWorkQueue reaches or exceeds the fence. At that point it the
+// API subset used by WorkQueueSets pretends the WorkQueue is empty until the
+// fence is removed. This functionality is a primitive intended for use by
+// throttling mechanisms.
+class BASE_EXPORT WorkQueue {
+ public:
+ using QueueType = internal::TaskQueueImpl::WorkQueueType;
+
+ // Note |task_queue| can be null if queue_type is kNonNestable.
+ WorkQueue(TaskQueueImpl* task_queue, const char* name, QueueType queue_type);
+ ~WorkQueue();
+
+ // Associates this work queue with the given work queue sets. This must be
+ // called before any tasks can be inserted into this work queue.
+ void AssignToWorkQueueSets(WorkQueueSets* work_queue_sets);
+
+ // Assigns the current set index.
+ void AssignSetIndex(size_t work_queue_set_index);
+
+ void AsValueInto(TimeTicks now, trace_event::TracedValue* state) const;
+
+ // Returns true if the |tasks_| is empty. This method ignores any fences.
+ bool Empty() const { return tasks_.empty(); }
+
+ // If the |tasks_| isn't empty and a fence hasn't been reached,
+ // |enqueue_order| gets set to the enqueue order of the front task and the
+ // function returns true. Otherwise the function returns false.
+ bool GetFrontTaskEnqueueOrder(EnqueueOrder* enqueue_order) const;
+
+ // Returns the first task in this queue or null if the queue is empty. This
+ // method ignores any fences.
+ const TaskQueueImpl::Task* GetFrontTask() const;
+
+ // Returns the last task in this queue or null if the queue is empty. This
+ // method ignores any fences.
+ const TaskQueueImpl::Task* GetBackTask() const;
+
+ // Pushes the task onto the |tasks_| and if a fence hasn't been reached
+ // it informs the WorkQueueSets if the head changed.
+ void Push(TaskQueueImpl::Task task);
+
+ // Pushes the task onto the front of the |tasks_| and if it's before any
+ // fence it informs the WorkQueueSets the head changed. Use with caution this
+ // API can easily lead to task starvation if misused.
+ void PushNonNestableTaskToFront(TaskQueueImpl::Task task);
+
+ // Reloads the empty |tasks_| with
+ // |task_queue_->TakeImmediateIncomingQueue| and if a fence hasn't been
+ // reached it informs the WorkQueueSets if the head changed.
+ void ReloadEmptyImmediateQueue();
+
+ size_t Size() const { return tasks_.size(); }
+
+ // Pulls a task off the |tasks_| and informs the WorkQueueSets. If the
+ // task removed had an enqueue order >= the current fence then WorkQueue
+ // pretends to be empty as far as the WorkQueueSets is concerned.
+ TaskQueueImpl::Task TakeTaskFromWorkQueue();
+
+ // Removes all canceled tasks from the head of the list. Returns true if any
+ // tasks were removed.
+ bool RemoveAllCanceledTasksFromFront();
+
+ const char* name() const { return name_; }
+
+ TaskQueueImpl* task_queue() const { return task_queue_; }
+
+ WorkQueueSets* work_queue_sets() const { return work_queue_sets_; }
+
+ size_t work_queue_set_index() const { return work_queue_set_index_; }
+
+ HeapHandle heap_handle() const { return heap_handle_; }
+
+ void set_heap_handle(HeapHandle handle) { heap_handle_ = handle; }
+
+ QueueType queue_type() const { return queue_type_; }
+
+ // Returns true if the front task in this queue has an older enqueue order
+ // than the front task of |other_queue|. Both queue are assumed to be
+ // non-empty. This method ignores any fences.
+ bool ShouldRunBefore(const WorkQueue* other_queue) const;
+
+ // Submit a fence. When TakeTaskFromWorkQueue encounters a task whose
+ // enqueue_order is >= |fence| then the WorkQueue will start pretending to be.
+ // empty.
+ // Inserting a fence may supersede a previous one and unblock some tasks.
+ // Returns true if any tasks where unblocked, returns false otherwise.
+ bool InsertFence(EnqueueOrder fence);
+
+ // Submit a fence without triggering a WorkQueueSets notification.
+ // Caller must ensure that WorkQueueSets are properly updated.
+ // This method should not be called when a fence is already present.
+ void InsertFenceSilently(EnqueueOrder fence);
+
+ // Removes any fences that where added and if WorkQueue was pretending to be
+ // empty, then the real value is reported to WorkQueueSets. Returns true if
+ // any tasks where unblocked.
+ bool RemoveFence();
+
+ // Returns true if any tasks are blocked by the fence. Returns true if the
+ // queue is empty and fence has been set (i.e. future tasks would be blocked).
+ // Otherwise returns false.
+ bool BlockedByFence() const;
+
+ // Test support function. This should not be used in production code.
+ void PopTaskForTesting();
+
+ private:
+ bool InsertFenceImpl(EnqueueOrder fence);
+
+ TaskQueueImpl::TaskDeque tasks_;
+ WorkQueueSets* work_queue_sets_ = nullptr; // NOT OWNED.
+ TaskQueueImpl* const task_queue_; // NOT OWNED.
+ size_t work_queue_set_index_ = 0;
+ HeapHandle heap_handle_;
+ const char* const name_;
+ EnqueueOrder fence_;
+ const QueueType queue_type_;
+
+ DISALLOW_COPY_AND_ASSIGN(WorkQueue);
+};
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_WORK_QUEUE_H_
diff --git a/chromium/base/task/sequence_manager/work_queue_sets.cc b/chromium/base/task/sequence_manager/work_queue_sets.cc
new file mode 100644
index 00000000000..e56fc82e0b3
--- /dev/null
+++ b/chromium/base/task/sequence_manager/work_queue_sets.cc
@@ -0,0 +1,172 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/work_queue_sets.h"
+
+#include "base/logging.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+WorkQueueSets::WorkQueueSets(size_t num_sets, const char* name)
+ : work_queue_heaps_(num_sets), name_(name) {}
+
+WorkQueueSets::~WorkQueueSets() = default;
+
+void WorkQueueSets::AddQueue(WorkQueue* work_queue, size_t set_index) {
+ DCHECK(!work_queue->work_queue_sets());
+ DCHECK_LT(set_index, work_queue_heaps_.size());
+ EnqueueOrder enqueue_order;
+ bool has_enqueue_order = work_queue->GetFrontTaskEnqueueOrder(&enqueue_order);
+ work_queue->AssignToWorkQueueSets(this);
+ work_queue->AssignSetIndex(set_index);
+ if (!has_enqueue_order)
+ return;
+ work_queue_heaps_[set_index].insert({enqueue_order, work_queue});
+}
+
+void WorkQueueSets::RemoveQueue(WorkQueue* work_queue) {
+ DCHECK_EQ(this, work_queue->work_queue_sets());
+ work_queue->AssignToWorkQueueSets(nullptr);
+ HeapHandle heap_handle = work_queue->heap_handle();
+ if (!heap_handle.IsValid())
+ return;
+ size_t set_index = work_queue->work_queue_set_index();
+ DCHECK_LT(set_index, work_queue_heaps_.size());
+ work_queue_heaps_[set_index].erase(heap_handle);
+}
+
+void WorkQueueSets::ChangeSetIndex(WorkQueue* work_queue, size_t set_index) {
+ DCHECK_EQ(this, work_queue->work_queue_sets());
+ DCHECK_LT(set_index, work_queue_heaps_.size());
+ EnqueueOrder enqueue_order;
+ bool has_enqueue_order = work_queue->GetFrontTaskEnqueueOrder(&enqueue_order);
+ size_t old_set = work_queue->work_queue_set_index();
+ DCHECK_LT(old_set, work_queue_heaps_.size());
+ DCHECK_NE(old_set, set_index);
+ work_queue->AssignSetIndex(set_index);
+ if (!has_enqueue_order)
+ return;
+ work_queue_heaps_[old_set].erase(work_queue->heap_handle());
+ work_queue_heaps_[set_index].insert({enqueue_order, work_queue});
+}
+
+void WorkQueueSets::OnFrontTaskChanged(WorkQueue* work_queue) {
+ EnqueueOrder enqueue_order;
+ bool has_enqueue_order = work_queue->GetFrontTaskEnqueueOrder(&enqueue_order);
+ DCHECK(has_enqueue_order);
+ size_t set = work_queue->work_queue_set_index();
+ work_queue_heaps_[set].ChangeKey(work_queue->heap_handle(),
+ {enqueue_order, work_queue});
+}
+
+void WorkQueueSets::OnTaskPushedToEmptyQueue(WorkQueue* work_queue) {
+ // NOTE if this function changes, we need to keep |WorkQueueSets::AddQueue| in
+ // sync.
+ DCHECK_EQ(this, work_queue->work_queue_sets());
+ EnqueueOrder enqueue_order;
+ bool has_enqueue_order = work_queue->GetFrontTaskEnqueueOrder(&enqueue_order);
+ DCHECK(has_enqueue_order);
+ size_t set_index = work_queue->work_queue_set_index();
+ DCHECK_LT(set_index, work_queue_heaps_.size())
+ << " set_index = " << set_index;
+ // |work_queue| should not be in work_queue_heaps_[set_index].
+ DCHECK(!work_queue->heap_handle().IsValid());
+ work_queue_heaps_[set_index].insert({enqueue_order, work_queue});
+}
+
+void WorkQueueSets::OnPopQueue(WorkQueue* work_queue) {
+ // Assume that |work_queue| contains the lowest enqueue_order.
+ size_t set_index = work_queue->work_queue_set_index();
+ DCHECK_EQ(this, work_queue->work_queue_sets());
+ DCHECK_LT(set_index, work_queue_heaps_.size());
+ DCHECK(!work_queue_heaps_[set_index].empty()) << " set_index = " << set_index;
+ DCHECK_EQ(work_queue_heaps_[set_index].Min().value, work_queue)
+ << " set_index = " << set_index;
+ DCHECK(work_queue->heap_handle().IsValid());
+ EnqueueOrder enqueue_order;
+ if (work_queue->GetFrontTaskEnqueueOrder(&enqueue_order)) {
+ // O(log n)
+ work_queue_heaps_[set_index].ReplaceMin({enqueue_order, work_queue});
+ } else {
+ // O(log n)
+ work_queue_heaps_[set_index].Pop();
+ DCHECK(work_queue_heaps_[set_index].empty() ||
+ work_queue_heaps_[set_index].Min().value != work_queue);
+ }
+}
+
+void WorkQueueSets::OnQueueBlocked(WorkQueue* work_queue) {
+ DCHECK_EQ(this, work_queue->work_queue_sets());
+ HeapHandle heap_handle = work_queue->heap_handle();
+ if (!heap_handle.IsValid())
+ return;
+ size_t set_index = work_queue->work_queue_set_index();
+ DCHECK_LT(set_index, work_queue_heaps_.size());
+ work_queue_heaps_[set_index].erase(heap_handle);
+}
+
+bool WorkQueueSets::GetOldestQueueInSet(size_t set_index,
+ WorkQueue** out_work_queue) const {
+ DCHECK_LT(set_index, work_queue_heaps_.size());
+ if (work_queue_heaps_[set_index].empty())
+ return false;
+ *out_work_queue = work_queue_heaps_[set_index].Min().value;
+ DCHECK_EQ(set_index, (*out_work_queue)->work_queue_set_index());
+ DCHECK((*out_work_queue)->heap_handle().IsValid());
+ return true;
+}
+
+bool WorkQueueSets::GetOldestQueueAndEnqueueOrderInSet(
+ size_t set_index,
+ WorkQueue** out_work_queue,
+ EnqueueOrder* out_enqueue_order) const {
+ DCHECK_LT(set_index, work_queue_heaps_.size());
+ if (work_queue_heaps_[set_index].empty())
+ return false;
+ const OldestTaskEnqueueOrder& oldest = work_queue_heaps_[set_index].Min();
+ *out_work_queue = oldest.value;
+ *out_enqueue_order = oldest.key;
+ EnqueueOrder enqueue_order;
+ DCHECK(oldest.value->GetFrontTaskEnqueueOrder(&enqueue_order) &&
+ oldest.key == enqueue_order);
+ return true;
+}
+
+bool WorkQueueSets::IsSetEmpty(size_t set_index) const {
+ DCHECK_LT(set_index, work_queue_heaps_.size())
+ << " set_index = " << set_index;
+ return work_queue_heaps_[set_index].empty();
+}
+
+#if DCHECK_IS_ON() || !defined(NDEBUG)
+bool WorkQueueSets::ContainsWorkQueueForTest(
+ const WorkQueue* work_queue) const {
+ EnqueueOrder enqueue_order;
+ bool has_enqueue_order = work_queue->GetFrontTaskEnqueueOrder(&enqueue_order);
+
+ for (const IntrusiveHeap<OldestTaskEnqueueOrder>& heap : work_queue_heaps_) {
+ for (const OldestTaskEnqueueOrder& heap_value_pair : heap) {
+ if (heap_value_pair.value == work_queue) {
+ DCHECK(has_enqueue_order);
+ DCHECK_EQ(heap_value_pair.key, enqueue_order);
+ DCHECK_EQ(this, work_queue->work_queue_sets());
+ return true;
+ }
+ }
+ }
+
+ if (work_queue->work_queue_sets() == this) {
+ DCHECK(!has_enqueue_order);
+ return true;
+ }
+
+ return false;
+}
+#endif
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
diff --git a/chromium/base/task/sequence_manager/work_queue_sets.h b/chromium/base/task/sequence_manager/work_queue_sets.h
new file mode 100644
index 00000000000..01db04084cf
--- /dev/null
+++ b/chromium/base/task/sequence_manager/work_queue_sets.h
@@ -0,0 +1,102 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_WORK_QUEUE_SETS_H_
+#define BASE_TASK_SEQUENCE_MANAGER_WORK_QUEUE_SETS_H_
+
+#include <map>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/task/sequence_manager/intrusive_heap.h"
+#include "base/task/sequence_manager/task_queue_impl.h"
+#include "base/task/sequence_manager/work_queue.h"
+#include "base/trace_event/trace_event_argument.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+// There is a WorkQueueSet for each scheduler priority and each WorkQueueSet
+// uses a EnqueueOrderToWorkQueueMap to keep track of which queue in the set has
+// the oldest task (i.e. the one that should be run next if the
+// TaskQueueSelector chooses to run a task a given priority). The reason this
+// works is because std::map is a tree based associative container and all the
+// values are kept in sorted order.
+class BASE_EXPORT WorkQueueSets {
+ public:
+ WorkQueueSets(size_t num_sets, const char* name);
+ ~WorkQueueSets();
+
+ // O(log num queues)
+ void AddQueue(WorkQueue* queue, size_t set_index);
+
+ // O(log num queues)
+ void RemoveQueue(WorkQueue* work_queue);
+
+ // O(log num queues)
+ void ChangeSetIndex(WorkQueue* queue, size_t set_index);
+
+ // O(log num queues)
+ void OnFrontTaskChanged(WorkQueue* queue);
+
+ // O(log num queues)
+ void OnTaskPushedToEmptyQueue(WorkQueue* work_queue);
+
+ // If empty it's O(1) amortized, otherwise it's O(log num queues)
+ // Assumes |work_queue| contains the lowest enqueue order in the set.
+ void OnPopQueue(WorkQueue* work_queue);
+
+ // O(log num queues)
+ void OnQueueBlocked(WorkQueue* work_queue);
+
+ // O(1)
+ bool GetOldestQueueInSet(size_t set_index, WorkQueue** out_work_queue) const;
+
+ // O(1)
+ bool GetOldestQueueAndEnqueueOrderInSet(
+ size_t set_index,
+ WorkQueue** out_work_queue,
+ EnqueueOrder* out_enqueue_order) const;
+
+ // O(1)
+ bool IsSetEmpty(size_t set_index) const;
+
+#if DCHECK_IS_ON() || !defined(NDEBUG)
+ // Note this iterates over everything in |work_queue_heaps_|.
+ // It's intended for use with DCHECKS and for testing
+ bool ContainsWorkQueueForTest(const WorkQueue* queue) const;
+#endif
+
+ const char* GetName() const { return name_; }
+
+ private:
+ struct OldestTaskEnqueueOrder {
+ EnqueueOrder key;
+ WorkQueue* value;
+
+ bool operator<=(const OldestTaskEnqueueOrder& other) const {
+ return key <= other.key;
+ }
+
+ void SetHeapHandle(HeapHandle handle) { value->set_heap_handle(handle); }
+
+ void ClearHeapHandle() { value->set_heap_handle(HeapHandle()); }
+ };
+
+ // For each set |work_queue_heaps_| has a queue of WorkQueue ordered by the
+ // oldest task in each WorkQueue.
+ std::vector<IntrusiveHeap<OldestTaskEnqueueOrder>> work_queue_heaps_;
+ const char* const name_;
+
+ DISALLOW_COPY_AND_ASSIGN(WorkQueueSets);
+};
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_WORK_QUEUE_SETS_H_
diff --git a/chromium/base/task/sequence_manager/work_queue_sets_unittest.cc b/chromium/base/task/sequence_manager/work_queue_sets_unittest.cc
new file mode 100644
index 00000000000..b849eec0797
--- /dev/null
+++ b/chromium/base/task/sequence_manager/work_queue_sets_unittest.cc
@@ -0,0 +1,328 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/work_queue_sets.h"
+
+#include <stddef.h>
+
+#include "base/memory/ptr_util.h"
+#include "base/task/sequence_manager/work_queue.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace base {
+namespace sequence_manager {
+
+class TimeDomain;
+
+namespace internal {
+
+class WorkQueueSetsTest : public testing::Test {
+ public:
+ void SetUp() override {
+ work_queue_sets_.reset(new WorkQueueSets(kNumSets, "test"));
+ }
+
+ void TearDown() override {
+ for (std::unique_ptr<WorkQueue>& work_queue : work_queues_) {
+ if (work_queue->work_queue_sets())
+ work_queue_sets_->RemoveQueue(work_queue.get());
+ }
+ }
+
+ protected:
+ enum {
+ kNumSets = 5 // An arbitary choice.
+ };
+
+ WorkQueue* NewTaskQueue(const char* queue_name) {
+ WorkQueue* queue =
+ new WorkQueue(nullptr, "test", WorkQueue::QueueType::kImmediate);
+ work_queues_.push_back(WrapUnique(queue));
+ work_queue_sets_->AddQueue(queue, TaskQueue::kControlPriority);
+ return queue;
+ }
+
+ TaskQueueImpl::Task FakeTaskWithEnqueueOrder(int enqueue_order) {
+ TaskQueueImpl::Task fake_task(
+ TaskQueue::PostedTask(BindOnce([] {}), FROM_HERE), TimeTicks(),
+ EnqueueOrder(), EnqueueOrder::FromIntForTesting(enqueue_order));
+ return fake_task;
+ }
+
+ TaskQueueImpl::Task FakeNonNestableTaskWithEnqueueOrder(int enqueue_order) {
+ TaskQueueImpl::Task fake_task(
+ TaskQueue::PostedTask(BindOnce([] {}), FROM_HERE), TimeTicks(),
+ EnqueueOrder(), EnqueueOrder::FromIntForTesting(enqueue_order));
+ fake_task.nestable = Nestable::kNonNestable;
+ return fake_task;
+ }
+
+ std::vector<std::unique_ptr<WorkQueue>> work_queues_;
+ std::unique_ptr<WorkQueueSets> work_queue_sets_;
+};
+
+TEST_F(WorkQueueSetsTest, ChangeSetIndex) {
+ WorkQueue* work_queue = NewTaskQueue("queue");
+ size_t set = TaskQueue::kNormalPriority;
+ work_queue_sets_->ChangeSetIndex(work_queue, set);
+
+ EXPECT_EQ(set, work_queue->work_queue_set_index());
+}
+
+TEST_F(WorkQueueSetsTest, GetOldestQueueInSet_QueueEmpty) {
+ WorkQueue* work_queue = NewTaskQueue("queue");
+ size_t set = TaskQueue::kNormalPriority;
+ work_queue_sets_->ChangeSetIndex(work_queue, set);
+
+ WorkQueue* selected_work_queue;
+ EXPECT_FALSE(
+ work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+}
+
+TEST_F(WorkQueueSetsTest, OnTaskPushedToEmptyQueue) {
+ WorkQueue* work_queue = NewTaskQueue("queue");
+ size_t set = TaskQueue::kNormalPriority;
+ work_queue_sets_->ChangeSetIndex(work_queue, set);
+
+ WorkQueue* selected_work_queue;
+ EXPECT_FALSE(
+ work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+
+ // Calls OnTaskPushedToEmptyQueue.
+ work_queue->Push(FakeTaskWithEnqueueOrder(10));
+
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+ EXPECT_EQ(work_queue, selected_work_queue);
+}
+
+TEST_F(WorkQueueSetsTest, GetOldestQueueInSet_SingleTaskInSet) {
+ WorkQueue* work_queue = NewTaskQueue("queue");
+ work_queue->Push(FakeTaskWithEnqueueOrder(10));
+ size_t set = 1;
+ work_queue_sets_->ChangeSetIndex(work_queue, set);
+
+ WorkQueue* selected_work_queue;
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+ EXPECT_EQ(work_queue, selected_work_queue);
+}
+
+TEST_F(WorkQueueSetsTest, GetOldestQueueAndEnqueueOrderInSet) {
+ WorkQueue* work_queue = NewTaskQueue("queue");
+ work_queue->Push(FakeTaskWithEnqueueOrder(10));
+ size_t set = 1;
+ work_queue_sets_->ChangeSetIndex(work_queue, set);
+
+ WorkQueue* selected_work_queue;
+ EnqueueOrder enqueue_order;
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueAndEnqueueOrderInSet(
+ set, &selected_work_queue, &enqueue_order));
+ EXPECT_EQ(work_queue, selected_work_queue);
+ EXPECT_EQ(10u, enqueue_order);
+}
+
+TEST_F(WorkQueueSetsTest, GetOldestQueueInSet_MultipleAgesInSet) {
+ WorkQueue* queue1 = NewTaskQueue("queue1");
+ WorkQueue* queue2 = NewTaskQueue("queue2");
+ WorkQueue* queue3 = NewTaskQueue("queue2");
+ queue1->Push(FakeTaskWithEnqueueOrder(6));
+ queue2->Push(FakeTaskWithEnqueueOrder(5));
+ queue3->Push(FakeTaskWithEnqueueOrder(4));
+ size_t set = 2;
+ work_queue_sets_->ChangeSetIndex(queue1, set);
+ work_queue_sets_->ChangeSetIndex(queue2, set);
+ work_queue_sets_->ChangeSetIndex(queue3, set);
+
+ WorkQueue* selected_work_queue;
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+ EXPECT_EQ(queue3, selected_work_queue);
+}
+
+TEST_F(WorkQueueSetsTest, OnPopQueue) {
+ WorkQueue* queue1 = NewTaskQueue("queue1");
+ WorkQueue* queue2 = NewTaskQueue("queue2");
+ WorkQueue* queue3 = NewTaskQueue("queue3");
+ queue1->Push(FakeTaskWithEnqueueOrder(6));
+ queue2->Push(FakeTaskWithEnqueueOrder(1));
+ queue2->Push(FakeTaskWithEnqueueOrder(3));
+ queue3->Push(FakeTaskWithEnqueueOrder(4));
+ size_t set = 3;
+ work_queue_sets_->ChangeSetIndex(queue1, set);
+ work_queue_sets_->ChangeSetIndex(queue2, set);
+ work_queue_sets_->ChangeSetIndex(queue3, set);
+
+ WorkQueue* selected_work_queue;
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+ EXPECT_EQ(queue2, selected_work_queue);
+
+ queue2->PopTaskForTesting();
+ work_queue_sets_->OnPopQueue(queue2);
+
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+ EXPECT_EQ(queue2, selected_work_queue);
+}
+
+TEST_F(WorkQueueSetsTest, OnPopQueue_QueueBecomesEmpty) {
+ WorkQueue* queue1 = NewTaskQueue("queue1");
+ WorkQueue* queue2 = NewTaskQueue("queue2");
+ WorkQueue* queue3 = NewTaskQueue("queue3");
+ queue1->Push(FakeTaskWithEnqueueOrder(6));
+ queue2->Push(FakeTaskWithEnqueueOrder(5));
+ queue3->Push(FakeTaskWithEnqueueOrder(4));
+ size_t set = 4;
+ work_queue_sets_->ChangeSetIndex(queue1, set);
+ work_queue_sets_->ChangeSetIndex(queue2, set);
+ work_queue_sets_->ChangeSetIndex(queue3, set);
+
+ WorkQueue* selected_work_queue;
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+ EXPECT_EQ(queue3, selected_work_queue);
+
+ queue3->PopTaskForTesting();
+ work_queue_sets_->OnPopQueue(queue3);
+
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+ EXPECT_EQ(queue2, selected_work_queue);
+}
+
+TEST_F(WorkQueueSetsTest,
+ GetOldestQueueInSet_MultipleAgesInSetIntegerRollover) {
+ WorkQueue* queue1 = NewTaskQueue("queue1");
+ WorkQueue* queue2 = NewTaskQueue("queue2");
+ WorkQueue* queue3 = NewTaskQueue("queue3");
+ queue1->Push(FakeTaskWithEnqueueOrder(0x7ffffff1));
+ queue2->Push(FakeTaskWithEnqueueOrder(0x7ffffff0));
+ queue3->Push(FakeTaskWithEnqueueOrder(-0x7ffffff1));
+ size_t set = 1;
+ work_queue_sets_->ChangeSetIndex(queue1, set);
+ work_queue_sets_->ChangeSetIndex(queue2, set);
+ work_queue_sets_->ChangeSetIndex(queue3, set);
+
+ WorkQueue* selected_work_queue;
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+ EXPECT_EQ(queue2, selected_work_queue);
+}
+
+TEST_F(WorkQueueSetsTest, GetOldestQueueInSet_MultipleAgesInSet_RemoveQueue) {
+ WorkQueue* queue1 = NewTaskQueue("queue1");
+ WorkQueue* queue2 = NewTaskQueue("queue2");
+ WorkQueue* queue3 = NewTaskQueue("queue3");
+ queue1->Push(FakeTaskWithEnqueueOrder(6));
+ queue2->Push(FakeTaskWithEnqueueOrder(5));
+ queue3->Push(FakeTaskWithEnqueueOrder(4));
+ size_t set = 1;
+ work_queue_sets_->ChangeSetIndex(queue1, set);
+ work_queue_sets_->ChangeSetIndex(queue2, set);
+ work_queue_sets_->ChangeSetIndex(queue3, set);
+ work_queue_sets_->RemoveQueue(queue3);
+
+ WorkQueue* selected_work_queue;
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+ EXPECT_EQ(queue2, selected_work_queue);
+}
+
+TEST_F(WorkQueueSetsTest, ChangeSetIndex_Complex) {
+ WorkQueue* queue1 = NewTaskQueue("queue1");
+ WorkQueue* queue2 = NewTaskQueue("queue2");
+ WorkQueue* queue3 = NewTaskQueue("queue3");
+ WorkQueue* queue4 = NewTaskQueue("queue4");
+ queue1->Push(FakeTaskWithEnqueueOrder(6));
+ queue2->Push(FakeTaskWithEnqueueOrder(5));
+ queue3->Push(FakeTaskWithEnqueueOrder(4));
+ queue4->Push(FakeTaskWithEnqueueOrder(3));
+ size_t set1 = 1;
+ size_t set2 = 2;
+ work_queue_sets_->ChangeSetIndex(queue1, set1);
+ work_queue_sets_->ChangeSetIndex(queue2, set1);
+ work_queue_sets_->ChangeSetIndex(queue3, set2);
+ work_queue_sets_->ChangeSetIndex(queue4, set2);
+
+ WorkQueue* selected_work_queue;
+ EXPECT_TRUE(
+ work_queue_sets_->GetOldestQueueInSet(set1, &selected_work_queue));
+ EXPECT_EQ(queue2, selected_work_queue);
+
+ EXPECT_TRUE(
+ work_queue_sets_->GetOldestQueueInSet(set2, &selected_work_queue));
+ EXPECT_EQ(queue4, selected_work_queue);
+
+ work_queue_sets_->ChangeSetIndex(queue4, set1);
+
+ EXPECT_TRUE(
+ work_queue_sets_->GetOldestQueueInSet(set1, &selected_work_queue));
+ EXPECT_EQ(queue4, selected_work_queue);
+
+ EXPECT_TRUE(
+ work_queue_sets_->GetOldestQueueInSet(set2, &selected_work_queue));
+ EXPECT_EQ(queue3, selected_work_queue);
+}
+
+TEST_F(WorkQueueSetsTest, IsSetEmpty_NoWork) {
+ size_t set = 2;
+ EXPECT_TRUE(work_queue_sets_->IsSetEmpty(set));
+
+ WorkQueue* work_queue = NewTaskQueue("queue");
+ work_queue_sets_->ChangeSetIndex(work_queue, set);
+ EXPECT_TRUE(work_queue_sets_->IsSetEmpty(set));
+}
+
+TEST_F(WorkQueueSetsTest, IsSetEmpty_Work) {
+ size_t set = 2;
+ EXPECT_TRUE(work_queue_sets_->IsSetEmpty(set));
+
+ WorkQueue* work_queue = NewTaskQueue("queue");
+ work_queue->Push(FakeTaskWithEnqueueOrder(1));
+ work_queue_sets_->ChangeSetIndex(work_queue, set);
+ EXPECT_FALSE(work_queue_sets_->IsSetEmpty(set));
+
+ work_queue->PopTaskForTesting();
+ work_queue_sets_->OnPopQueue(work_queue);
+ EXPECT_TRUE(work_queue_sets_->IsSetEmpty(set));
+}
+
+TEST_F(WorkQueueSetsTest, BlockQueuesByFence) {
+ WorkQueue* queue1 = NewTaskQueue("queue1");
+ WorkQueue* queue2 = NewTaskQueue("queue2");
+
+ queue1->Push(FakeTaskWithEnqueueOrder(6));
+ queue2->Push(FakeTaskWithEnqueueOrder(7));
+ queue1->Push(FakeTaskWithEnqueueOrder(8));
+ queue2->Push(FakeTaskWithEnqueueOrder(9));
+
+ size_t set = TaskQueue::kControlPriority;
+
+ WorkQueue* selected_work_queue;
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+ EXPECT_EQ(selected_work_queue, queue1);
+
+ queue1->InsertFence(EnqueueOrder::blocking_fence());
+
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+ EXPECT_EQ(selected_work_queue, queue2);
+}
+
+TEST_F(WorkQueueSetsTest, PushNonNestableTaskToFront) {
+ WorkQueue* queue1 = NewTaskQueue("queue1");
+ WorkQueue* queue2 = NewTaskQueue("queue2");
+ WorkQueue* queue3 = NewTaskQueue("queue3");
+ queue1->Push(FakeTaskWithEnqueueOrder(6));
+ queue2->Push(FakeTaskWithEnqueueOrder(5));
+ queue3->Push(FakeTaskWithEnqueueOrder(4));
+ size_t set = 4;
+ work_queue_sets_->ChangeSetIndex(queue1, set);
+ work_queue_sets_->ChangeSetIndex(queue2, set);
+ work_queue_sets_->ChangeSetIndex(queue3, set);
+
+ WorkQueue* selected_work_queue;
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+ EXPECT_EQ(queue3, selected_work_queue);
+
+ queue1->PushNonNestableTaskToFront(FakeNonNestableTaskWithEnqueueOrder(2));
+
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+ EXPECT_EQ(queue1, selected_work_queue);
+}
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
diff --git a/chromium/base/task/sequence_manager/work_queue_unittest.cc b/chromium/base/task/sequence_manager/work_queue_unittest.cc
new file mode 100644
index 00000000000..a71cebcabcb
--- /dev/null
+++ b/chromium/base/task/sequence_manager/work_queue_unittest.cc
@@ -0,0 +1,475 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/work_queue.h"
+
+#include <stddef.h>
+#include <memory>
+
+#include "base/bind.h"
+#include "base/task/sequence_manager/real_time_domain.h"
+#include "base/task/sequence_manager/task_queue_impl.h"
+#include "base/task/sequence_manager/work_queue_sets.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+namespace {
+
+void NopTask() {}
+
+struct Cancelable {
+ Cancelable() : weak_ptr_factory(this) {}
+
+ void NopTask() {}
+
+ WeakPtrFactory<Cancelable> weak_ptr_factory;
+};
+
+} // namespace
+
+class WorkQueueTest : public testing::Test {
+ public:
+ void SetUp() override {
+ time_domain_.reset(new RealTimeDomain());
+ task_queue_ = std::make_unique<TaskQueueImpl>(nullptr, time_domain_.get(),
+ TaskQueue::Spec("test"));
+
+ work_queue_.reset(new WorkQueue(task_queue_.get(), "test",
+ WorkQueue::QueueType::kImmediate));
+ work_queue_sets_.reset(new WorkQueueSets(1, "test"));
+ work_queue_sets_->AddQueue(work_queue_.get(), 0);
+ }
+
+ void TearDown() override { work_queue_sets_->RemoveQueue(work_queue_.get()); }
+
+ protected:
+ TaskQueueImpl::Task FakeCancelableTaskWithEnqueueOrder(
+ int enqueue_order,
+ WeakPtr<Cancelable> weak_ptr) {
+ TaskQueueImpl::Task fake_task(
+ TaskQueue::PostedTask(BindOnce(&Cancelable::NopTask, weak_ptr),
+ FROM_HERE),
+ TimeTicks(), EnqueueOrder(),
+ EnqueueOrder::FromIntForTesting(enqueue_order));
+ return fake_task;
+ }
+
+ TaskQueueImpl::Task FakeTaskWithEnqueueOrder(int enqueue_order) {
+ TaskQueueImpl::Task fake_task(
+ TaskQueue::PostedTask(BindOnce(&NopTask), FROM_HERE), TimeTicks(),
+ EnqueueOrder(), EnqueueOrder::FromIntForTesting(enqueue_order));
+ return fake_task;
+ }
+
+ TaskQueueImpl::Task FakeNonNestableTaskWithEnqueueOrder(int enqueue_order) {
+ TaskQueueImpl::Task fake_task(
+ TaskQueue::PostedTask(BindOnce(&NopTask), FROM_HERE), TimeTicks(),
+ EnqueueOrder(), EnqueueOrder::FromIntForTesting(enqueue_order));
+ fake_task.nestable = Nestable::kNonNestable;
+ return fake_task;
+ }
+
+ std::unique_ptr<RealTimeDomain> time_domain_;
+ std::unique_ptr<TaskQueueImpl> task_queue_;
+ std::unique_ptr<WorkQueue> work_queue_;
+ std::unique_ptr<WorkQueueSets> work_queue_sets_;
+ std::unique_ptr<TaskQueueImpl::TaskDeque> incoming_queue_;
+};
+
+TEST_F(WorkQueueTest, Empty) {
+ EXPECT_TRUE(work_queue_->Empty());
+ work_queue_->Push(FakeTaskWithEnqueueOrder(1));
+ EXPECT_FALSE(work_queue_->Empty());
+}
+
+TEST_F(WorkQueueTest, Empty_IgnoresFences) {
+ work_queue_->Push(FakeTaskWithEnqueueOrder(1));
+ work_queue_->InsertFence(EnqueueOrder::blocking_fence());
+ EXPECT_FALSE(work_queue_->Empty());
+}
+
+TEST_F(WorkQueueTest, GetFrontTaskEnqueueOrderQueueEmpty) {
+ EnqueueOrder enqueue_order;
+ EXPECT_FALSE(work_queue_->GetFrontTaskEnqueueOrder(&enqueue_order));
+}
+
+TEST_F(WorkQueueTest, GetFrontTaskEnqueueOrder) {
+ work_queue_->Push(FakeTaskWithEnqueueOrder(2));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(3));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(4));
+
+ EnqueueOrder enqueue_order;
+ EXPECT_TRUE(work_queue_->GetFrontTaskEnqueueOrder(&enqueue_order));
+ EXPECT_EQ(2ull, enqueue_order);
+}
+
+TEST_F(WorkQueueTest, GetFrontTaskQueueEmpty) {
+ EXPECT_EQ(nullptr, work_queue_->GetFrontTask());
+}
+
+TEST_F(WorkQueueTest, GetFrontTask) {
+ work_queue_->Push(FakeTaskWithEnqueueOrder(2));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(3));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(4));
+
+ ASSERT_NE(nullptr, work_queue_->GetFrontTask());
+ EXPECT_EQ(2ull, work_queue_->GetFrontTask()->enqueue_order());
+}
+
+TEST_F(WorkQueueTest, GetBackTask_Empty) {
+ EXPECT_EQ(nullptr, work_queue_->GetBackTask());
+}
+
+TEST_F(WorkQueueTest, GetBackTask) {
+ work_queue_->Push(FakeTaskWithEnqueueOrder(2));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(3));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(4));
+
+ ASSERT_NE(nullptr, work_queue_->GetBackTask());
+ EXPECT_EQ(4ull, work_queue_->GetBackTask()->enqueue_order());
+}
+
+TEST_F(WorkQueueTest, Push) {
+ WorkQueue* work_queue;
+ EXPECT_FALSE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+
+ work_queue_->Push(FakeTaskWithEnqueueOrder(2));
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+ EXPECT_EQ(work_queue_.get(), work_queue);
+}
+
+TEST_F(WorkQueueTest, PushAfterFenceHit) {
+ work_queue_->InsertFence(EnqueueOrder::blocking_fence());
+ WorkQueue* work_queue;
+ EXPECT_FALSE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+
+ work_queue_->Push(FakeTaskWithEnqueueOrder(2));
+ EXPECT_FALSE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+}
+
+TEST_F(WorkQueueTest, PushNonNestableTaskToFront) {
+ WorkQueue* work_queue;
+ EXPECT_FALSE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+
+ work_queue_->PushNonNestableTaskToFront(
+ FakeNonNestableTaskWithEnqueueOrder(3));
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+ EXPECT_EQ(work_queue_.get(), work_queue);
+
+ work_queue_->PushNonNestableTaskToFront(
+ FakeNonNestableTaskWithEnqueueOrder(2));
+
+ EXPECT_EQ(2ull, work_queue_->GetFrontTask()->enqueue_order());
+ EXPECT_EQ(3ull, work_queue_->GetBackTask()->enqueue_order());
+}
+
+TEST_F(WorkQueueTest, PushNonNestableTaskToFrontAfterFenceHit) {
+ work_queue_->InsertFence(EnqueueOrder::blocking_fence());
+ WorkQueue* work_queue;
+ EXPECT_FALSE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+
+ work_queue_->PushNonNestableTaskToFront(
+ FakeNonNestableTaskWithEnqueueOrder(2));
+ EXPECT_FALSE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+}
+
+TEST_F(WorkQueueTest, PushNonNestableTaskToFrontBeforeFenceHit) {
+ work_queue_->InsertFence(EnqueueOrder::FromIntForTesting(3));
+ WorkQueue* work_queue;
+ EXPECT_FALSE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+
+ work_queue_->PushNonNestableTaskToFront(
+ FakeNonNestableTaskWithEnqueueOrder(2));
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+}
+
+TEST_F(WorkQueueTest, ReloadEmptyImmediateQueue) {
+ task_queue_->PushImmediateIncomingTaskForTest(FakeTaskWithEnqueueOrder(2));
+ task_queue_->PushImmediateIncomingTaskForTest(FakeTaskWithEnqueueOrder(3));
+ task_queue_->PushImmediateIncomingTaskForTest(FakeTaskWithEnqueueOrder(4));
+
+ WorkQueue* work_queue;
+ EXPECT_FALSE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+ EXPECT_TRUE(work_queue_->Empty());
+ work_queue_->ReloadEmptyImmediateQueue();
+
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+ EXPECT_FALSE(work_queue_->Empty());
+
+ ASSERT_NE(nullptr, work_queue_->GetFrontTask());
+ EXPECT_EQ(2ull, work_queue_->GetFrontTask()->enqueue_order());
+
+ ASSERT_NE(nullptr, work_queue_->GetBackTask());
+ EXPECT_EQ(4ull, work_queue_->GetBackTask()->enqueue_order());
+}
+
+TEST_F(WorkQueueTest, ReloadEmptyImmediateQueueAfterFenceHit) {
+ work_queue_->InsertFence(EnqueueOrder::blocking_fence());
+ task_queue_->PushImmediateIncomingTaskForTest(FakeTaskWithEnqueueOrder(2));
+ task_queue_->PushImmediateIncomingTaskForTest(FakeTaskWithEnqueueOrder(3));
+ task_queue_->PushImmediateIncomingTaskForTest(FakeTaskWithEnqueueOrder(4));
+
+ WorkQueue* work_queue;
+ EXPECT_FALSE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+ EXPECT_TRUE(work_queue_->Empty());
+ work_queue_->ReloadEmptyImmediateQueue();
+
+ EXPECT_FALSE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+ EXPECT_FALSE(work_queue_->Empty());
+
+ ASSERT_NE(nullptr, work_queue_->GetFrontTask());
+ EXPECT_EQ(2ull, work_queue_->GetFrontTask()->enqueue_order());
+
+ ASSERT_NE(nullptr, work_queue_->GetBackTask());
+ EXPECT_EQ(4ull, work_queue_->GetBackTask()->enqueue_order());
+}
+
+TEST_F(WorkQueueTest, TakeTaskFromWorkQueue) {
+ work_queue_->Push(FakeTaskWithEnqueueOrder(2));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(3));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(4));
+
+ WorkQueue* work_queue;
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+ EXPECT_FALSE(work_queue_->Empty());
+
+ EXPECT_EQ(2ull, work_queue_->TakeTaskFromWorkQueue().enqueue_order());
+ EXPECT_EQ(3ull, work_queue_->TakeTaskFromWorkQueue().enqueue_order());
+ EXPECT_EQ(4ull, work_queue_->TakeTaskFromWorkQueue().enqueue_order());
+
+ EXPECT_FALSE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+ EXPECT_TRUE(work_queue_->Empty());
+}
+
+TEST_F(WorkQueueTest, TakeTaskFromWorkQueue_HitFence) {
+ work_queue_->InsertFence(EnqueueOrder::FromIntForTesting(3));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(2));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(4));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+
+ WorkQueue* work_queue;
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+ EXPECT_FALSE(work_queue_->Empty());
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+
+ EXPECT_EQ(2ull, work_queue_->TakeTaskFromWorkQueue().enqueue_order());
+ EXPECT_FALSE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+ EXPECT_FALSE(work_queue_->Empty());
+ EXPECT_TRUE(work_queue_->BlockedByFence());
+}
+
+TEST_F(WorkQueueTest, InsertFenceBeforeEnqueueing) {
+ EXPECT_FALSE(work_queue_->InsertFence(EnqueueOrder::blocking_fence()));
+ EXPECT_TRUE(work_queue_->BlockedByFence());
+
+ work_queue_->Push(FakeTaskWithEnqueueOrder(2));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(3));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(4));
+
+ EnqueueOrder enqueue_order;
+ EXPECT_FALSE(work_queue_->GetFrontTaskEnqueueOrder(&enqueue_order));
+}
+
+TEST_F(WorkQueueTest, InsertFenceAfterEnqueueingNonBlocking) {
+ work_queue_->Push(FakeTaskWithEnqueueOrder(2));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(3));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(4));
+
+ EXPECT_FALSE(work_queue_->InsertFence(EnqueueOrder::FromIntForTesting(5)));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+
+ EnqueueOrder enqueue_order;
+ EXPECT_TRUE(work_queue_->GetFrontTaskEnqueueOrder(&enqueue_order));
+ EXPECT_EQ(2ull, work_queue_->TakeTaskFromWorkQueue().enqueue_order());
+}
+
+TEST_F(WorkQueueTest, InsertFenceAfterEnqueueing) {
+ work_queue_->Push(FakeTaskWithEnqueueOrder(2));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(3));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(4));
+
+ // NB in reality a fence will always be greater than any currently enqueued
+ // tasks.
+ EXPECT_FALSE(work_queue_->InsertFence(EnqueueOrder::blocking_fence()));
+ EXPECT_TRUE(work_queue_->BlockedByFence());
+
+ EnqueueOrder enqueue_order;
+ EXPECT_FALSE(work_queue_->GetFrontTaskEnqueueOrder(&enqueue_order));
+}
+
+TEST_F(WorkQueueTest, InsertNewFence) {
+ work_queue_->Push(FakeTaskWithEnqueueOrder(2));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(4));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(5));
+
+ EXPECT_FALSE(work_queue_->InsertFence(EnqueueOrder::FromIntForTesting(3)));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+
+ // Note until TakeTaskFromWorkQueue() is called we don't hit the fence.
+ EnqueueOrder enqueue_order;
+ EXPECT_TRUE(work_queue_->GetFrontTaskEnqueueOrder(&enqueue_order));
+ EXPECT_EQ(2ull, enqueue_order);
+
+ EXPECT_EQ(2ull, work_queue_->TakeTaskFromWorkQueue().enqueue_order());
+ EXPECT_FALSE(work_queue_->GetFrontTaskEnqueueOrder(&enqueue_order));
+ EXPECT_TRUE(work_queue_->BlockedByFence());
+
+ // Inserting the new fence should temporarily unblock the queue until the new
+ // one is hit.
+ EXPECT_TRUE(work_queue_->InsertFence(EnqueueOrder::FromIntForTesting(6)));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+
+ EXPECT_TRUE(work_queue_->GetFrontTaskEnqueueOrder(&enqueue_order));
+ EXPECT_EQ(4ull, enqueue_order);
+ EXPECT_EQ(4ull, work_queue_->TakeTaskFromWorkQueue().enqueue_order());
+ EXPECT_TRUE(work_queue_->GetFrontTaskEnqueueOrder(&enqueue_order));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+}
+
+TEST_F(WorkQueueTest, PushWithNonEmptyQueueDoesNotHitFence) {
+ work_queue_->Push(FakeTaskWithEnqueueOrder(1));
+ EXPECT_FALSE(work_queue_->InsertFence(EnqueueOrder::FromIntForTesting(2)));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(3));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+}
+
+TEST_F(WorkQueueTest, RemoveFence) {
+ work_queue_->Push(FakeTaskWithEnqueueOrder(2));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(4));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(5));
+ work_queue_->InsertFence(EnqueueOrder::FromIntForTesting(3));
+
+ WorkQueue* work_queue;
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+ EXPECT_FALSE(work_queue_->Empty());
+
+ EXPECT_EQ(2ull, work_queue_->TakeTaskFromWorkQueue().enqueue_order());
+ EXPECT_FALSE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+ EXPECT_FALSE(work_queue_->Empty());
+ EXPECT_TRUE(work_queue_->BlockedByFence());
+
+ EXPECT_TRUE(work_queue_->RemoveFence());
+ EXPECT_EQ(4ull, work_queue_->TakeTaskFromWorkQueue().enqueue_order());
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+}
+
+TEST_F(WorkQueueTest, RemoveFenceButNoFence) {
+ EXPECT_FALSE(work_queue_->RemoveFence());
+}
+
+TEST_F(WorkQueueTest, RemoveFenceNothingUnblocked) {
+ EXPECT_FALSE(work_queue_->InsertFence(EnqueueOrder::blocking_fence()));
+ EXPECT_TRUE(work_queue_->BlockedByFence());
+
+ EXPECT_FALSE(work_queue_->RemoveFence());
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+}
+
+TEST_F(WorkQueueTest, BlockedByFence) {
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+ EXPECT_FALSE(work_queue_->InsertFence(EnqueueOrder::blocking_fence()));
+ EXPECT_TRUE(work_queue_->BlockedByFence());
+}
+
+TEST_F(WorkQueueTest, BlockedByFencePopBecomesEmpty) {
+ work_queue_->Push(FakeTaskWithEnqueueOrder(1));
+ EXPECT_FALSE(work_queue_->InsertFence(EnqueueOrder::FromIntForTesting(2)));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+
+ EXPECT_EQ(1ull, work_queue_->TakeTaskFromWorkQueue().enqueue_order());
+ EXPECT_TRUE(work_queue_->BlockedByFence());
+}
+
+TEST_F(WorkQueueTest, BlockedByFencePop) {
+ work_queue_->Push(FakeTaskWithEnqueueOrder(1));
+ EXPECT_FALSE(work_queue_->InsertFence(EnqueueOrder::FromIntForTesting(2)));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+
+ work_queue_->Push(FakeTaskWithEnqueueOrder(3));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+
+ EXPECT_EQ(1ull, work_queue_->TakeTaskFromWorkQueue().enqueue_order());
+ EXPECT_TRUE(work_queue_->BlockedByFence());
+}
+
+TEST_F(WorkQueueTest, InitiallyEmptyBlockedByFenceNewFenceUnblocks) {
+ EXPECT_FALSE(work_queue_->InsertFence(EnqueueOrder::blocking_fence()));
+ EXPECT_TRUE(work_queue_->BlockedByFence());
+
+ work_queue_->Push(FakeTaskWithEnqueueOrder(2));
+ EXPECT_TRUE(work_queue_->InsertFence(EnqueueOrder::FromIntForTesting(3)));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+}
+
+TEST_F(WorkQueueTest, BlockedByFenceNewFenceUnblocks) {
+ work_queue_->Push(FakeTaskWithEnqueueOrder(1));
+ EXPECT_FALSE(work_queue_->InsertFence(EnqueueOrder::FromIntForTesting(2)));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+
+ work_queue_->Push(FakeTaskWithEnqueueOrder(3));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+
+ EXPECT_EQ(1ull, work_queue_->TakeTaskFromWorkQueue().enqueue_order());
+ EXPECT_TRUE(work_queue_->BlockedByFence());
+
+ EXPECT_TRUE(work_queue_->InsertFence(EnqueueOrder::FromIntForTesting(4)));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+}
+
+TEST_F(WorkQueueTest, InsertFenceAfterEnqueuing) {
+ work_queue_->Push(FakeTaskWithEnqueueOrder(2));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(3));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(4));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+
+ EXPECT_FALSE(work_queue_->InsertFence(EnqueueOrder::blocking_fence()));
+ EXPECT_TRUE(work_queue_->BlockedByFence());
+
+ EnqueueOrder enqueue_order;
+ EXPECT_FALSE(work_queue_->GetFrontTaskEnqueueOrder(&enqueue_order));
+}
+
+TEST_F(WorkQueueTest, RemoveAllCanceledTasksFromFront) {
+ {
+ Cancelable cancelable;
+ work_queue_->Push(FakeCancelableTaskWithEnqueueOrder(
+ 2, cancelable.weak_ptr_factory.GetWeakPtr()));
+ work_queue_->Push(FakeCancelableTaskWithEnqueueOrder(
+ 3, cancelable.weak_ptr_factory.GetWeakPtr()));
+ work_queue_->Push(FakeCancelableTaskWithEnqueueOrder(
+ 4, cancelable.weak_ptr_factory.GetWeakPtr()));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(5));
+ }
+ EXPECT_TRUE(work_queue_->RemoveAllCanceledTasksFromFront());
+
+ EnqueueOrder enqueue_order;
+ EXPECT_TRUE(work_queue_->GetFrontTaskEnqueueOrder(&enqueue_order));
+ EXPECT_EQ(5ull, enqueue_order);
+}
+
+TEST_F(WorkQueueTest, RemoveAllCanceledTasksFromFrontTasksNotCanceled) {
+ {
+ Cancelable cancelable;
+ work_queue_->Push(FakeCancelableTaskWithEnqueueOrder(
+ 2, cancelable.weak_ptr_factory.GetWeakPtr()));
+ work_queue_->Push(FakeCancelableTaskWithEnqueueOrder(
+ 3, cancelable.weak_ptr_factory.GetWeakPtr()));
+ work_queue_->Push(FakeCancelableTaskWithEnqueueOrder(
+ 4, cancelable.weak_ptr_factory.GetWeakPtr()));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(5));
+ EXPECT_FALSE(work_queue_->RemoveAllCanceledTasksFromFront());
+
+ EnqueueOrder enqueue_order;
+ EXPECT_TRUE(work_queue_->GetFrontTaskEnqueueOrder(&enqueue_order));
+ EXPECT_EQ(2ull, enqueue_order);
+ }
+}
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
diff --git a/chromium/base/task_runner.h b/chromium/base/task_runner.h
index e4c6b41db61..1d302ab0fa8 100644
--- a/chromium/base/task_runner.h
+++ b/chromium/base/task_runner.h
@@ -136,10 +136,6 @@ class BASE_EXPORT TaskRunner
protected:
friend struct TaskRunnerTraits;
- // Only the Windows debug build seems to need this: see
- // http://crbug.com/112250.
- friend class RefCountedThreadSafe<TaskRunner, TaskRunnerTraits>;
-
TaskRunner();
virtual ~TaskRunner();
diff --git a/chromium/base/task_scheduler/environment_config.cc b/chromium/base/task_scheduler/environment_config.cc
index 393b5916168..3c76f2fc983 100644
--- a/chromium/base/task_scheduler/environment_config.cc
+++ b/chromium/base/task_scheduler/environment_config.cc
@@ -4,6 +4,10 @@
#include "base/task_scheduler/environment_config.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/platform_thread.h"
+#include "build/build_config.h"
+
namespace base {
namespace internal {
@@ -15,5 +19,27 @@ size_t GetEnvironmentIndexForTraits(const TaskTraits& traits) {
return is_background ? BACKGROUND : FOREGROUND;
}
+bool CanUseBackgroundPriorityForSchedulerWorker() {
+ // When Lock doesn't handle multiple thread priorities, run all
+ // SchedulerWorker with a normal priority to avoid priority inversion when a
+ // thread running with a normal priority tries to acquire a lock held by a
+ // thread running with a background priority.
+ if (!Lock::HandlesMultipleThreadPriorities())
+ return false;
+
+#if !defined(OS_ANDROID)
+ // When thread priority can't be increased, run all threads with a normal
+ // priority to avoid priority inversions on shutdown (TaskScheduler increases
+ // background threads priority to normal on shutdown while resolving remaining
+ // shutdown blocking tasks).
+ //
+ // This is ignored on Android, because it doesn't have a clean shutdown phase.
+ if (!PlatformThread::CanIncreaseCurrentThreadPriority())
+ return false;
+#endif // defined(OS_ANDROID)
+
+ return true;
+}
+
} // namespace internal
} // namespace base
diff --git a/chromium/base/task_scheduler/environment_config.h b/chromium/base/task_scheduler/environment_config.h
index 54f2ff34da1..19b685aeb34 100644
--- a/chromium/base/task_scheduler/environment_config.h
+++ b/chromium/base/task_scheduler/environment_config.h
@@ -15,10 +15,13 @@ namespace base {
namespace internal {
enum EnvironmentType {
- BACKGROUND = 0,
- BACKGROUND_BLOCKING,
- FOREGROUND,
+ FOREGROUND = 0,
FOREGROUND_BLOCKING,
+ // Pools will only be created for the environment above on platforms that
+ // don't support SchedulerWorkers running with a background priority.
+ ENVIRONMENT_COUNT_WITHOUT_BACKGROUND_PRIORITY,
+ BACKGROUND = ENVIRONMENT_COUNT_WITHOUT_BACKGROUND_PRIORITY,
+ BACKGROUND_BLOCKING,
ENVIRONMENT_COUNT // Always last.
};
@@ -32,14 +35,18 @@ constexpr struct {
// priority depends on shutdown state and platform capabilities.
ThreadPriority priority_hint;
} kEnvironmentParams[] = {
- {"Background", base::ThreadPriority::BACKGROUND},
- {"BackgroundBlocking", base::ThreadPriority::BACKGROUND},
{"Foreground", base::ThreadPriority::NORMAL},
{"ForegroundBlocking", base::ThreadPriority::NORMAL},
+ {"Background", base::ThreadPriority::BACKGROUND},
+ {"BackgroundBlocking", base::ThreadPriority::BACKGROUND},
};
size_t BASE_EXPORT GetEnvironmentIndexForTraits(const TaskTraits& traits);
+// Returns true if this platform supports having SchedulerWorkers running with a
+// background priority.
+bool BASE_EXPORT CanUseBackgroundPriorityForSchedulerWorker();
+
} // namespace internal
} // namespace base
diff --git a/chromium/base/task_scheduler/priority_queue_unittest.cc b/chromium/base/task_scheduler/priority_queue_unittest.cc
index 9dc4d1359f4..f131c55fe74 100644
--- a/chromium/base/task_scheduler/priority_queue_unittest.cc
+++ b/chromium/base/task_scheduler/priority_queue_unittest.cc
@@ -30,9 +30,7 @@ class ThreadBeginningTransaction : public SimpleThread {
public:
explicit ThreadBeginningTransaction(PriorityQueue* priority_queue)
: SimpleThread("ThreadBeginningTransaction"),
- priority_queue_(priority_queue),
- transaction_began_(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED) {}
+ priority_queue_(priority_queue) {}
// SimpleThread:
void Run() override {
diff --git a/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager.cc b/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager.cc
index 5928f41307a..737df42b1c3 100644
--- a/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager.cc
+++ b/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager.cc
@@ -249,7 +249,7 @@ class SchedulerWorkerCOMDelegate : public SchedulerWorkerDelegate {
},
std::move(msg)),
TaskTraits(MayBlock()), TimeDelta());
- if (task_tracker_->WillPostTask(pump_message_task)) {
+ if (task_tracker_->WillPostTask(&pump_message_task)) {
bool was_empty =
message_pump_sequence_->PushTask(std::move(pump_message_task));
DCHECK(was_empty) << "GetWorkFromWindowsMessageQueue() does not expect "
@@ -300,7 +300,7 @@ class SchedulerSingleThreadTaskRunnerManager::SchedulerSingleThreadTaskRunner
Task task(from_here, std::move(closure), traits_, delay);
task.single_thread_task_runner_ref = this;
- if (!outer_->task_tracker_->WillPostTask(task))
+ if (!outer_->task_tracker_->WillPostTask(&task))
return false;
if (task.delayed_run_time.is_null()) {
@@ -422,12 +422,13 @@ void SchedulerSingleThreadTaskRunnerManager::Start(
workers_to_start = workers_;
}
- // Start workers that were created before this method was called. Other
- // workers are started as they are created.
- for (scoped_refptr<SchedulerWorker> worker : workers_to_start) {
+ // Start workers that were created before this method was called.
+ // Workers that already need to wake up are already signaled as part of
+ // SchedulerSingleThreadTaskRunner::PostTaskNow(). As a result, it's
+ // unnecessary to call WakeUp() for each worker (in fact, an extraneous
+ // WakeUp() would be racy and wrong - see https://crbug.com/862582).
+ for (scoped_refptr<SchedulerWorker> worker : workers_to_start)
worker->Start(scheduler_worker_observer_);
- worker->WakeUp();
- }
}
scoped_refptr<SingleThreadTaskRunner>
@@ -491,7 +492,10 @@ SchedulerSingleThreadTaskRunnerManager::CreateTaskRunnerWithTraitsImpl(
worker_name += "Shared";
worker_name += environment_params.name_suffix;
worker = CreateAndRegisterSchedulerWorker<DelegateType>(
- worker_name, thread_mode, environment_params.priority_hint);
+ worker_name, thread_mode,
+ CanUseBackgroundPriorityForSchedulerWorker()
+ ? environment_params.priority_hint
+ : ThreadPriority::NORMAL);
new_worker = true;
}
started = started_;
diff --git a/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager_unittest.cc b/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager_unittest.cc
index 52d99f6e838..8eb02f3a9d2 100644
--- a/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager_unittest.cc
+++ b/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager_unittest.cc
@@ -11,6 +11,7 @@
#include "base/synchronization/lock.h"
#include "base/synchronization/waitable_event.h"
#include "base/task_scheduler/delayed_task_manager.h"
+#include "base/task_scheduler/environment_config.h"
#include "base/task_scheduler/post_task.h"
#include "base/task_scheduler/scheduler_worker_pool_params.h"
#include "base/task_scheduler/task_tracker.h"
@@ -198,10 +199,8 @@ TEST_F(TaskSchedulerSingleThreadTaskRunnerManagerTest,
// Regression test for https://crbug.com/829786
TEST_F(TaskSchedulerSingleThreadTaskRunnerManagerTest,
ContinueOnShutdownDoesNotBlockBlockShutdown) {
- WaitableEvent task_has_started(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
- WaitableEvent task_can_continue(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent task_has_started;
+ WaitableEvent task_can_continue;
// Post a CONTINUE_ON_SHUTDOWN task that waits on
// |task_can_continue| to a shared SingleThreadTaskRunner.
@@ -272,9 +271,7 @@ TEST_P(TaskSchedulerSingleThreadTaskRunnerManagerCommonTest,
ThreadPriority thread_priority_background;
task_runner_background->PostTask(
FROM_HERE, BindOnce(&CaptureThreadPriority, &thread_priority_background));
- WaitableEvent waitable_event_background(
- WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent waitable_event_background;
task_runner_background->PostTask(
FROM_HERE,
BindOnce(&WaitableEvent::Signal, Unretained(&waitable_event_background)));
@@ -282,9 +279,7 @@ TEST_P(TaskSchedulerSingleThreadTaskRunnerManagerCommonTest,
ThreadPriority thread_priority_normal;
task_runner_normal->PostTask(
FROM_HERE, BindOnce(&CaptureThreadPriority, &thread_priority_normal));
- WaitableEvent waitable_event_normal(
- WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent waitable_event_normal;
task_runner_normal->PostTask(
FROM_HERE,
BindOnce(&WaitableEvent::Signal, Unretained(&waitable_event_normal)));
@@ -292,12 +287,10 @@ TEST_P(TaskSchedulerSingleThreadTaskRunnerManagerCommonTest,
waitable_event_background.Wait();
waitable_event_normal.Wait();
- if (Lock::HandlesMultipleThreadPriorities() &&
- PlatformThread::CanIncreaseCurrentThreadPriority()) {
+ if (CanUseBackgroundPriorityForSchedulerWorker())
EXPECT_EQ(ThreadPriority::BACKGROUND, thread_priority_background);
- } else {
+ else
EXPECT_EQ(ThreadPriority::NORMAL, thread_priority_background);
- }
EXPECT_EQ(ThreadPriority::NORMAL, thread_priority_normal);
}
@@ -412,9 +405,7 @@ class CallJoinFromDifferentThread : public SimpleThread {
CallJoinFromDifferentThread(
SchedulerSingleThreadTaskRunnerManager* manager_to_join)
: SimpleThread("SchedulerSingleThreadTaskRunnerManagerJoinThread"),
- manager_to_join_(manager_to_join),
- run_started_event_(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED) {}
+ manager_to_join_(manager_to_join) {}
~CallJoinFromDifferentThread() override = default;
@@ -453,10 +444,8 @@ class TaskSchedulerSingleThreadTaskRunnerManagerJoinTest
TEST_F(TaskSchedulerSingleThreadTaskRunnerManagerJoinTest, ConcurrentJoin) {
// Exercises the codepath where the workers are unavailable for unregistration
// because of a Join call.
- WaitableEvent task_running(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
- WaitableEvent task_blocking(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent task_running;
+ WaitableEvent task_blocking;
{
auto task_runner = single_thread_task_runner_manager_
@@ -483,10 +472,8 @@ TEST_F(TaskSchedulerSingleThreadTaskRunnerManagerJoinTest,
ConcurrentJoinExtraSkippedTask) {
// Tests to make sure that tasks are properly cleaned up at Join, allowing
// SingleThreadTaskRunners to unregister themselves.
- WaitableEvent task_running(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
- WaitableEvent task_blocking(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent task_running;
+ WaitableEvent task_blocking;
{
auto task_runner = single_thread_task_runner_manager_
@@ -645,8 +632,7 @@ class TaskSchedulerSingleThreadTaskRunnerManagerStartTest
TEST_F(TaskSchedulerSingleThreadTaskRunnerManagerStartTest,
PostTaskBeforeStart) {
AtomicFlag manager_started;
- WaitableEvent task_finished(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent task_finished;
single_thread_task_runner_manager_
->CreateSingleThreadTaskRunnerWithTraits(
TaskTraits(), SingleThreadTaskRunnerThreadMode::DEDICATED)
diff --git a/chromium/base/task_scheduler/scheduler_worker.cc b/chromium/base/task_scheduler/scheduler_worker.cc
index ce57267c295..152b534cbb1 100644
--- a/chromium/base/task_scheduler/scheduler_worker.cc
+++ b/chromium/base/task_scheduler/scheduler_worker.cc
@@ -11,6 +11,7 @@
#include "base/compiler_specific.h"
#include "base/debug/alias.h"
#include "base/logging.h"
+#include "base/task_scheduler/environment_config.h"
#include "base/task_scheduler/scheduler_worker_observer.h"
#include "base/task_scheduler/task_tracker.h"
#include "base/trace_event/trace_event.h"
@@ -55,6 +56,8 @@ SchedulerWorker::SchedulerWorker(
{
DCHECK(delegate_);
DCHECK(task_tracker_);
+ DCHECK(CanUseBackgroundPriorityForSchedulerWorker() ||
+ priority_hint_ != ThreadPriority::BACKGROUND);
}
bool SchedulerWorker::Start(
@@ -156,21 +159,10 @@ bool SchedulerWorker::ShouldExit() const {
}
ThreadPriority SchedulerWorker::GetDesiredThreadPriority() const {
- // All threads have a NORMAL priority when Lock doesn't handle multiple thread
- // priorities.
- if (!Lock::HandlesMultipleThreadPriorities())
+ // To avoid shutdown hangs, disallow a priority below NORMAL during shutdown
+ if (task_tracker_->HasShutdownStarted())
return ThreadPriority::NORMAL;
- // To avoid shutdown hangs, disallow a priority below NORMAL during shutdown.
- // If thread priority cannot be increased, never allow a priority below
- // NORMAL.
- if (static_cast<int>(priority_hint_) <
- static_cast<int>(ThreadPriority::NORMAL) &&
- (task_tracker_->HasShutdownStarted() ||
- !PlatformThread::CanIncreaseCurrentThreadPriority())) {
- return ThreadPriority::NORMAL;
- }
-
return priority_hint_;
}
diff --git a/chromium/base/task_scheduler/scheduler_worker_pool.cc b/chromium/base/task_scheduler/scheduler_worker_pool.cc
index 1a5c35da94f..4bb5ca70ed8 100644
--- a/chromium/base/task_scheduler/scheduler_worker_pool.cc
+++ b/chromium/base/task_scheduler/scheduler_worker_pool.cc
@@ -145,7 +145,7 @@ bool SchedulerWorkerPool::PostTaskWithSequence(
DCHECK(task.task);
DCHECK(sequence);
- if (!task_tracker_->WillPostTask(task))
+ if (!task_tracker_->WillPostTask(&task))
return false;
if (task.delayed_run_time.is_null()) {
diff --git a/chromium/base/task_scheduler/scheduler_worker_pool_impl.cc b/chromium/base/task_scheduler/scheduler_worker_pool_impl.cc
index 6eb4262cf9c..81dc5df4e00 100644
--- a/chromium/base/task_scheduler/scheduler_worker_pool_impl.cc
+++ b/chromium/base/task_scheduler/scheduler_worker_pool_impl.cc
@@ -90,9 +90,14 @@ class SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl
void WillBlockEntered();
// Returns true iff this worker has been within a MAY_BLOCK ScopedBlockingCall
- // for more than |outer_->MayBlockThreshold()|. The worker capacity must be
+ // for more than |outer_->MayBlockThreshold()|. The max tasks must be
// incremented if this returns true.
- bool MustIncrementWorkerCapacityLockRequired();
+ bool MustIncrementMaxTasksLockRequired();
+
+ bool is_running_background_task_lock_required() const {
+ outer_->lock_.AssertAcquired();
+ return is_running_background_task_;
+ }
private:
// Returns true if |worker| is allowed to cleanup and remove itself from the
@@ -120,9 +125,9 @@ class SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl
// TaskScheduler.NumTasksBeforeDetach histogram was recorded.
size_t num_tasks_since_last_detach_ = 0;
- // Whether |outer_->worker_capacity_| was incremented due to a
- // ScopedBlockingCall on the thread. Access synchronized by |outer_->lock_|.
- bool incremented_worker_capacity_since_blocked_ = false;
+ // Whether |outer_->max_tasks_| was incremented due to a ScopedBlockingCall on
+ // the thread. Access synchronized by |outer_->lock_|.
+ bool incremented_max_tasks_since_blocked_ = false;
// Time when MayBlockScopeEntered() was last called. Reset when
// BlockingScopeExited() is called. Access synchronized by |outer_->lock_|.
@@ -132,6 +137,12 @@ class SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl
// returned a non-empty sequence and DidRunTask() hasn't been called yet).
bool is_running_task_ = false;
+ // Whether this worker is currently running a TaskPriority::BACKGROUND task.
+ // Writes are made from the worker thread and are protected by
+ // |outer_->lock_|. Reads are made from any thread, they are protected by
+ // |outer_->lock_| when made outside of the worker thread.
+ bool is_running_background_task_ = false;
+
#if defined(OS_WIN)
std::unique_ptr<win::ScopedWindowsThreadEnvironment> win_thread_environment_;
#endif // defined(OS_WIN)
@@ -192,6 +203,7 @@ SchedulerWorkerPoolImpl::SchedulerWorkerPoolImpl(
void SchedulerWorkerPoolImpl::Start(
const SchedulerWorkerPoolParams& params,
+ int max_background_tasks,
scoped_refptr<TaskRunner> service_thread_task_runner,
SchedulerWorkerObserver* scheduler_worker_observer,
WorkerEnvironment worker_environment) {
@@ -199,9 +211,11 @@ void SchedulerWorkerPoolImpl::Start(
DCHECK(workers_.empty());
- worker_capacity_ = params.max_threads();
- initial_worker_capacity_ = worker_capacity_;
- DCHECK_LE(initial_worker_capacity_, kMaxNumberOfWorkers);
+ max_tasks_ = params.max_tasks();
+ DCHECK_GE(max_tasks_, 1U);
+ initial_max_tasks_ = max_tasks_;
+ DCHECK_LE(initial_max_tasks_, kMaxNumberOfWorkers);
+ max_background_tasks_ = max_background_tasks;
suggested_reclaim_time_ = params.suggested_reclaim_time();
backward_compatibility_ = params.backward_compatibility();
worker_environment_ = worker_environment;
@@ -213,8 +227,8 @@ void SchedulerWorkerPoolImpl::Start(
// The initial number of workers is |num_wake_ups_before_start_| + 1 to try to
// keep one at least one standby thread at all times (capacity permitting).
- const int num_initial_workers = std::min(num_wake_ups_before_start_ + 1,
- static_cast<int>(worker_capacity_));
+ const int num_initial_workers =
+ std::min(num_wake_ups_before_start_ + 1, static_cast<int>(max_tasks_));
workers_.reserve(num_initial_workers);
for (int index = 0; index < num_initial_workers; ++index) {
@@ -261,11 +275,11 @@ void SchedulerWorkerPoolImpl::GetHistograms(
int SchedulerWorkerPoolImpl::GetMaxConcurrentNonBlockedTasksDeprecated() const {
#if DCHECK_IS_ON()
AutoSchedulerLock auto_lock(lock_);
- DCHECK_NE(initial_worker_capacity_, 0U)
+ DCHECK_NE(initial_max_tasks_, 0U)
<< "GetMaxConcurrentTasksDeprecated() should only be called after the "
<< "worker pool has started.";
#endif
- return initial_worker_capacity_;
+ return initial_max_tasks_;
}
void SchedulerWorkerPoolImpl::WaitForWorkersIdleForTesting(size_t n) {
@@ -332,9 +346,9 @@ size_t SchedulerWorkerPoolImpl::NumberOfWorkersForTesting() const {
return workers_.size();
}
-size_t SchedulerWorkerPoolImpl::GetWorkerCapacityForTesting() const {
+size_t SchedulerWorkerPoolImpl::GetMaxTasksForTesting() const {
AutoSchedulerLock auto_lock(lock_);
- return worker_capacity_;
+ return max_tasks_;
}
size_t SchedulerWorkerPoolImpl::NumberOfIdleWorkersForTesting() const {
@@ -404,8 +418,9 @@ scoped_refptr<Sequence>
SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::GetWork(
SchedulerWorker* worker) {
DCHECK_CALLED_ON_VALID_THREAD(worker_thread_checker_);
-
DCHECK(!is_running_task_);
+ DCHECK(!is_running_background_task_);
+
{
AutoSchedulerLock auto_lock(outer_->lock_);
@@ -429,9 +444,9 @@ SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::GetWork(
}
// Excess workers should not get work, until they are no longer excess (i.e.
- // worker capacity increases or another worker cleans up). This ensures that
- // if we have excess workers in the pool, they get a chance to no longer be
- // excess before being cleaned up.
+ // max tasks increases or another worker cleans up). This ensures that if we
+ // have excess workers in the pool, they get a chance to no longer be excess
+ // before being cleaned up.
if (outer_->NumberOfExcessWorkersLockRequired() >
outer_->idle_workers_stack_.Size()) {
OnWorkerBecomesIdleLockRequired(worker);
@@ -440,11 +455,11 @@ SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::GetWork(
}
scoped_refptr<Sequence> sequence;
{
- std::unique_ptr<PriorityQueue::Transaction> shared_transaction(
+ std::unique_ptr<PriorityQueue::Transaction> transaction(
outer_->shared_priority_queue_.BeginTransaction());
- if (shared_transaction->IsEmpty()) {
- // |shared_transaction| is kept alive while |worker| is added to
+ if (transaction->IsEmpty()) {
+ // |transaction| is kept alive while |worker| is added to
// |idle_workers_stack_| to avoid this race:
// 1. This thread creates a Transaction, finds |shared_priority_queue_|
// empty and ends the Transaction.
@@ -457,11 +472,25 @@ SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::GetWork(
// 4. This thread adds itself to |idle_workers_stack_| and goes to sleep.
// No thread runs the Sequence inserted in step 2.
AutoSchedulerLock auto_lock(outer_->lock_);
-
OnWorkerBecomesIdleLockRequired(worker);
return nullptr;
}
- sequence = shared_transaction->PopSequence();
+
+ // Enforce that no more than |max_background_tasks_| run concurrently.
+ const TaskPriority priority = transaction->PeekSortKey().priority();
+ if (priority == TaskPriority::BACKGROUND) {
+ AutoSchedulerLock auto_lock(outer_->lock_);
+ if (outer_->num_running_background_tasks_ <
+ outer_->max_background_tasks_) {
+ ++outer_->num_running_background_tasks_;
+ is_running_background_task_ = true;
+ } else {
+ OnWorkerBecomesIdleLockRequired(worker);
+ return nullptr;
+ }
+ }
+
+ sequence = transaction->PopSequence();
}
DCHECK(sequence);
#if DCHECK_IS_ON()
@@ -477,12 +506,18 @@ SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::GetWork(
void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::DidRunTask() {
DCHECK_CALLED_ON_VALID_THREAD(worker_thread_checker_);
-
DCHECK(may_block_start_time_.is_null());
- DCHECK(!incremented_worker_capacity_since_blocked_);
+ DCHECK(!incremented_max_tasks_since_blocked_);
DCHECK(is_running_task_);
+
is_running_task_ = false;
+ if (is_running_background_task_) {
+ AutoSchedulerLock auto_lock(outer_->lock_);
+ --outer_->num_running_background_tasks_;
+ is_running_background_task_ = false;
+ }
+
++num_tasks_since_last_wait_;
++num_tasks_since_last_detach_;
}
@@ -609,8 +644,7 @@ void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::BlockingStarted(
BlockingType blocking_type) {
DCHECK_CALLED_ON_VALID_THREAD(worker_thread_checker_);
- // Blocking calls made outside of tasks should not influence the capacity
- // count as no task is running.
+ // Blocking calls made outside of tasks should not influence the max tasks.
if (!is_running_task_)
return;
@@ -632,8 +666,8 @@ void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::
AutoSchedulerLock auto_lock(outer_->lock_);
// Don't do anything if a MAY_BLOCK ScopedBlockingCall instantiated in the
- // same scope already caused the worker capacity to be incremented.
- if (incremented_worker_capacity_since_blocked_)
+ // same scope already caused the max tasks to be incremented.
+ if (incremented_max_tasks_since_blocked_)
return;
// Cancel the effect of a MAY_BLOCK ScopedBlockingCall instantiated in the
@@ -641,6 +675,8 @@ void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::
if (!may_block_start_time_.is_null()) {
may_block_start_time_ = TimeTicks();
--outer_->num_pending_may_block_workers_;
+ if (is_running_background_task_)
+ --outer_->num_pending_background_may_block_workers_;
}
}
@@ -655,14 +691,16 @@ void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::BlockingEnded() {
return;
AutoSchedulerLock auto_lock(outer_->lock_);
- if (incremented_worker_capacity_since_blocked_) {
- outer_->DecrementWorkerCapacityLockRequired();
+ if (incremented_max_tasks_since_blocked_) {
+ outer_->DecrementMaxTasksLockRequired(is_running_background_task_);
} else {
DCHECK(!may_block_start_time_.is_null());
--outer_->num_pending_may_block_workers_;
+ if (is_running_background_task_)
+ --outer_->num_pending_background_may_block_workers_;
}
- incremented_worker_capacity_since_blocked_ = false;
+ incremented_max_tasks_since_blocked_ = false;
may_block_start_time_ = TimeTicks();
}
@@ -672,12 +710,14 @@ void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::MayBlockEntered() {
{
AutoSchedulerLock auto_lock(outer_->lock_);
- DCHECK(!incremented_worker_capacity_since_blocked_);
+ DCHECK(!incremented_max_tasks_since_blocked_);
DCHECK(may_block_start_time_.is_null());
may_block_start_time_ = TimeTicks::Now();
++outer_->num_pending_may_block_workers_;
+ if (is_running_background_task_)
+ ++outer_->num_pending_background_may_block_workers_;
}
- outer_->PostAdjustWorkerCapacityTaskIfNeeded();
+ outer_->ScheduleAdjustMaxTasksIfNeeded();
}
void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::WillBlockEntered() {
@@ -685,27 +725,27 @@ void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::WillBlockEntered() {
bool wake_up_allowed = false;
{
- std::unique_ptr<PriorityQueue::Transaction> shared_transaction(
+ std::unique_ptr<PriorityQueue::Transaction> transaction(
outer_->shared_priority_queue_.BeginTransaction());
AutoSchedulerLock auto_lock(outer_->lock_);
- DCHECK(!incremented_worker_capacity_since_blocked_);
+ DCHECK(!incremented_max_tasks_since_blocked_);
DCHECK(may_block_start_time_.is_null());
- incremented_worker_capacity_since_blocked_ = true;
- outer_->IncrementWorkerCapacityLockRequired();
+ incremented_max_tasks_since_blocked_ = true;
+ outer_->IncrementMaxTasksLockRequired(is_running_background_task_);
- // If the number of workers was less than the old worker capacity, PostTask
+ // If the number of workers was less than the old max tasks, PostTask
// would've handled creating extra workers during WakeUpOneWorker.
// Therefore, we don't need to do anything here.
- if (outer_->workers_.size() < outer_->worker_capacity_ - 1)
+ if (outer_->workers_.size() < outer_->max_tasks_ - 1)
return;
- if (shared_transaction->IsEmpty()) {
+ if (transaction->IsEmpty()) {
outer_->MaintainAtLeastOneIdleWorkerLockRequired();
} else {
// TODO(crbug.com/757897): We may create extra workers in this case:
- // |workers.size()| was equal to the old |worker_capacity_|, we had
- // multiple ScopedBlockingCalls in parallel and we had work on the PQ.
+ // |workers.size()| was equal to the old |max_tasks_|, we had multiple
+ // ScopedBlockingCalls in parallel and we had work on the PQ.
wake_up_allowed = outer_->WakeUpOneWorkerLockRequired();
// |wake_up_allowed| is true when the pool is started, and a WILL_BLOCK
// scope cannot be entered before the pool starts.
@@ -715,22 +755,24 @@ void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::WillBlockEntered() {
// TODO(crbug.com/813857): This can be better handled in the PostTask()
// codepath. We really only should do this if there are tasks pending.
if (wake_up_allowed)
- outer_->PostAdjustWorkerCapacityTaskIfNeeded();
+ outer_->ScheduleAdjustMaxTasksIfNeeded();
}
bool SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::
- MustIncrementWorkerCapacityLockRequired() {
+ MustIncrementMaxTasksLockRequired() {
outer_->lock_.AssertAcquired();
- if (!incremented_worker_capacity_since_blocked_ &&
+ if (!incremented_max_tasks_since_blocked_ &&
!may_block_start_time_.is_null() &&
TimeTicks::Now() - may_block_start_time_ >= outer_->MayBlockThreshold()) {
- incremented_worker_capacity_since_blocked_ = true;
+ incremented_max_tasks_since_blocked_ = true;
// Reset |may_block_start_time_| so that BlockingScopeExited() knows that it
- // doesn't have to decrement |outer_->num_pending_may_block_workers_|.
+ // doesn't have to decrement the number of pending MAY_BLOCK workers.
may_block_start_time_ = TimeTicks();
--outer_->num_pending_may_block_workers_;
+ if (is_running_background_task_)
+ --outer_->num_pending_background_may_block_workers_;
return true;
}
@@ -783,7 +825,7 @@ void SchedulerWorkerPoolImpl::WakeUpOneWorker() {
wake_up_allowed = WakeUpOneWorkerLockRequired();
}
if (wake_up_allowed)
- PostAdjustWorkerCapacityTaskIfNeeded();
+ ScheduleAdjustMaxTasksIfNeeded();
}
void SchedulerWorkerPoolImpl::MaintainAtLeastOneIdleWorkerLockRequired() {
@@ -793,7 +835,7 @@ void SchedulerWorkerPoolImpl::MaintainAtLeastOneIdleWorkerLockRequired() {
return;
DCHECK_LT(workers_.size(), kMaxNumberOfWorkers);
- if (idle_workers_stack_.IsEmpty() && workers_.size() < worker_capacity_) {
+ if (idle_workers_stack_.IsEmpty() && workers_.size() < max_tasks_) {
SchedulerWorker* new_worker =
CreateRegisterAndStartSchedulerWorkerLockRequired();
if (new_worker)
@@ -823,7 +865,7 @@ SchedulerWorker*
SchedulerWorkerPoolImpl::CreateRegisterAndStartSchedulerWorkerLockRequired() {
lock_.AssertAcquired();
- DCHECK_LT(workers_.size(), worker_capacity_);
+ DCHECK_LT(workers_.size(), max_tasks_);
DCHECK_LT(workers_.size(), kMaxNumberOfWorkers);
// SchedulerWorker needs |lock_| as a predecessor for its thread lock
// because in WakeUpOneWorker, |lock_| is first acquired and then
@@ -838,7 +880,7 @@ SchedulerWorkerPoolImpl::CreateRegisterAndStartSchedulerWorkerLockRequired() {
return nullptr;
workers_.push_back(worker);
- DCHECK_LE(workers_.size(), worker_capacity_);
+ DCHECK_LE(workers_.size(), max_tasks_);
if (!cleanup_timestamps_.empty()) {
detach_duration_histogram_->AddTime(TimeTicks::Now() -
@@ -850,36 +892,38 @@ SchedulerWorkerPoolImpl::CreateRegisterAndStartSchedulerWorkerLockRequired() {
size_t SchedulerWorkerPoolImpl::NumberOfExcessWorkersLockRequired() const {
lock_.AssertAcquired();
- return std::max<int>(0, workers_.size() - worker_capacity_);
+ return std::max<int>(0, workers_.size() - max_tasks_);
}
-void SchedulerWorkerPoolImpl::AdjustWorkerCapacity() {
+void SchedulerWorkerPoolImpl::AdjustMaxTasks() {
DCHECK(service_thread_task_runner_->RunsTasksInCurrentSequence());
- std::unique_ptr<PriorityQueue::Transaction> shared_transaction(
+ std::unique_ptr<PriorityQueue::Transaction> transaction(
shared_priority_queue_.BeginTransaction());
AutoSchedulerLock auto_lock(lock_);
- const size_t original_worker_capacity = worker_capacity_;
+ const size_t previous_max_tasks = max_tasks_;
- // Increment worker capacity for each worker that has been within a MAY_BLOCK
+ // Increment max tasks for each worker that has been within a MAY_BLOCK
// ScopedBlockingCall for more than MayBlockThreshold().
for (scoped_refptr<SchedulerWorker> worker : workers_) {
// The delegates of workers inside a SchedulerWorkerPoolImpl should be
// SchedulerWorkerDelegateImpls.
SchedulerWorkerDelegateImpl* delegate =
static_cast<SchedulerWorkerDelegateImpl*>(worker->delegate());
- if (delegate->MustIncrementWorkerCapacityLockRequired())
- IncrementWorkerCapacityLockRequired();
+ if (delegate->MustIncrementMaxTasksLockRequired()) {
+ IncrementMaxTasksLockRequired(
+ delegate->is_running_background_task_lock_required());
+ }
}
// Wake up a worker per pending sequence, capacity permitting.
- const size_t num_pending_sequences = shared_transaction->Size();
- const size_t num_wake_ups_needed = std::min(
- worker_capacity_ - original_worker_capacity, num_pending_sequences);
+ const size_t num_pending_sequences = transaction->Size();
+ const size_t num_wake_ups_needed =
+ std::min(max_tasks_ - previous_max_tasks, num_pending_sequences);
for (size_t i = 0; i < num_wake_ups_needed; ++i) {
- // No need to call PostAdjustWorkerCapacityTaskIfNeeded() as the caller will
+ // No need to call ScheduleAdjustMaxTasksIfNeeded() as the caller will
// take care of that for us.
WakeUpOneWorkerLockRequired();
}
@@ -893,73 +937,91 @@ TimeDelta SchedulerWorkerPoolImpl::MayBlockThreshold() const {
// This value was set unscientifically based on intuition and may be adjusted
// in the future. This value is smaller than |kBlockedWorkersPollPeriod|
// because we hope than when multiple workers block around the same time, a
- // single AdjustWorkerCapacity() call will perform all the necessary capacity
+ // single AdjustMaxTasks() call will perform all the necessary max tasks
// adjustments.
return TimeDelta::FromMilliseconds(10);
}
-void SchedulerWorkerPoolImpl::PostAdjustWorkerCapacityTaskIfNeeded() {
+void SchedulerWorkerPoolImpl::ScheduleAdjustMaxTasksIfNeeded() {
{
AutoSchedulerLock auto_lock(lock_);
- if (polling_worker_capacity_ ||
- !ShouldPeriodicallyAdjustWorkerCapacityLockRequired()) {
+ if (polling_max_tasks_ || !ShouldPeriodicallyAdjustMaxTasksLockRequired()) {
return;
}
- polling_worker_capacity_ = true;
+ polling_max_tasks_ = true;
}
service_thread_task_runner_->PostDelayedTask(
FROM_HERE,
- BindOnce(&SchedulerWorkerPoolImpl::AdjustWorkerCapacityTaskFunction,
+ BindOnce(&SchedulerWorkerPoolImpl::AdjustMaxTasksFunction,
Unretained(this)),
kBlockedWorkersPollPeriod);
}
-void SchedulerWorkerPoolImpl::AdjustWorkerCapacityTaskFunction() {
+void SchedulerWorkerPoolImpl::AdjustMaxTasksFunction() {
DCHECK(service_thread_task_runner_->RunsTasksInCurrentSequence());
- AdjustWorkerCapacity();
+ AdjustMaxTasks();
{
AutoSchedulerLock auto_lock(lock_);
- DCHECK(polling_worker_capacity_);
+ DCHECK(polling_max_tasks_);
- if (!ShouldPeriodicallyAdjustWorkerCapacityLockRequired()) {
- polling_worker_capacity_ = false;
+ if (!ShouldPeriodicallyAdjustMaxTasksLockRequired()) {
+ polling_max_tasks_ = false;
return;
}
}
service_thread_task_runner_->PostDelayedTask(
FROM_HERE,
- BindOnce(&SchedulerWorkerPoolImpl::AdjustWorkerCapacityTaskFunction,
+ BindOnce(&SchedulerWorkerPoolImpl::AdjustMaxTasksFunction,
Unretained(this)),
kBlockedWorkersPollPeriod);
}
-bool SchedulerWorkerPoolImpl::
- ShouldPeriodicallyAdjustWorkerCapacityLockRequired() {
+bool SchedulerWorkerPoolImpl::ShouldPeriodicallyAdjustMaxTasksLockRequired() {
lock_.AssertAcquired();
- // AdjustWorkerCapacity() must be periodically called when (1) there are no
- // idle workers that can do work (2) there are workers that are within the
- // scope of a MAY_BLOCK ScopedBlockingCall but haven't cause a capacity
- // increment yet.
- //
+
+ // The maximum number of background tasks that can run concurrently must be
+ // adjusted periodically when (1) the number of background tasks that are
+ // currently running is equal to it and (2) there are workers running
+ // background tasks within the scope of a MAY_BLOCK ScopedBlockingCall but
+ // haven't cause a max background tasks increment yet.
+ // - When (1) is false: A newly posted background task will be allowed to run
+ // normally. There is no hurry to increase max background tasks.
+ // - When (2) is false: AdjustMaxTasks() wouldn't affect
+ // |max_background_tasks_|.
+ if (num_running_background_tasks_ >= max_background_tasks_ &&
+ num_pending_background_may_block_workers_ > 0) {
+ return true;
+ }
+
+ // The maximum number of tasks that can run concurrently must be adjusted
+ // periodically when (1) there are no idle workers that can do work (2) there
+ // are workers that are within the scope of a MAY_BLOCK ScopedBlockingCall but
+ // haven't cause a max tasks increment yet.
// - When (1) is false: A newly posted task will run on one of the idle
- // workers that are allowed to do work. There is no hurry to increase
- // capacity.
- // - When (2) is false: AdjustWorkerCapacity() would be a no-op.
+ // workers that are allowed to do work. There is no hurry to increase max
+ // tasks.
+ // - When (2) is false: AdjustMaxTasks() wouldn't affect |max_tasks_|.
const int idle_workers_that_can_do_work =
idle_workers_stack_.Size() - NumberOfExcessWorkersLockRequired();
return idle_workers_that_can_do_work <= 0 &&
num_pending_may_block_workers_ > 0;
}
-void SchedulerWorkerPoolImpl::DecrementWorkerCapacityLockRequired() {
+void SchedulerWorkerPoolImpl::DecrementMaxTasksLockRequired(
+ bool is_running_background_task) {
lock_.AssertAcquired();
- --worker_capacity_;
+ --max_tasks_;
+ if (is_running_background_task)
+ --max_background_tasks_;
}
-void SchedulerWorkerPoolImpl::IncrementWorkerCapacityLockRequired() {
+void SchedulerWorkerPoolImpl::IncrementMaxTasksLockRequired(
+ bool is_running_background_task) {
lock_.AssertAcquired();
- ++worker_capacity_;
+ ++max_tasks_;
+ if (is_running_background_task)
+ ++max_background_tasks_;
}
} // namespace internal
diff --git a/chromium/base/task_scheduler/scheduler_worker_pool_impl.h b/chromium/base/task_scheduler/scheduler_worker_pool_impl.h
index eb66b2fb05c..a641cb39a5f 100644
--- a/chromium/base/task_scheduler/scheduler_worker_pool_impl.h
+++ b/chromium/base/task_scheduler/scheduler_worker_pool_impl.h
@@ -76,13 +76,16 @@ class BASE_EXPORT SchedulerWorkerPoolImpl : public SchedulerWorkerPool {
DelayedTaskManager* delayed_task_manager);
// Creates workers following the |params| specification, allowing existing and
- // future tasks to run. Uses |service_thread_task_runner| to monitor for
- // blocked threads in the pool. If specified, |scheduler_worker_observer| will
- // be notified when a worker enters and exits its main function. It must not
- // be destroyed before JoinForTesting() has returned (must never be destroyed
- // in production). |worker_environment| specifies any requested environment to
- // execute the tasks. Can only be called once. CHECKs on failure.
+ // future tasks to run. The pool will run at most |max_background_tasks|
+ // unblocked TaskPriority::BACKGROUND tasks concurrently. Uses
+ // |service_thread_task_runner| to monitor for blocked threads in the pool. If
+ // specified, |scheduler_worker_observer| will be notified when a worker
+ // enters and exits its main function. It must not be destroyed before
+ // JoinForTesting() has returned (must never be destroyed in production).
+ // |worker_environment| specifies any requested environment to execute the
+ // tasks. Can only be called once. CHECKs on failure.
void Start(const SchedulerWorkerPoolParams& params,
+ int max_background_tasks,
scoped_refptr<TaskRunner> service_thread_task_runner,
SchedulerWorkerObserver* scheduler_worker_observer,
WorkerEnvironment worker_environment);
@@ -129,8 +132,8 @@ class BASE_EXPORT SchedulerWorkerPoolImpl : public SchedulerWorkerPool {
// Returns the number of workers in this worker pool.
size_t NumberOfWorkersForTesting() const;
- // Returns |worker_capacity_|.
- size_t GetWorkerCapacityForTesting() const;
+ // Returns |max_tasks_|.
+ size_t GetMaxTasksForTesting() const;
// Returns the number of workers that are idle (i.e. not running tasks).
size_t NumberOfIdleWorkersForTesting() const;
@@ -146,9 +149,9 @@ class BASE_EXPORT SchedulerWorkerPoolImpl : public SchedulerWorkerPool {
friend class TaskSchedulerWorkerPoolBlockingTest;
friend class TaskSchedulerWorkerPoolMayBlockTest;
- // The period between calls to AdjustWorkerCapacity() when the pool is at
- // capacity. This value was set unscientifically based on intuition and may be
- // adjusted in the future.
+ // The period between calls to AdjustMaxTasks() when the pool is at capacity.
+ // This value was set unscientifically based on intuition and may be adjusted
+ // in the future.
static constexpr TimeDelta kBlockedWorkersPollPeriod =
TimeDelta::FromMilliseconds(50);
@@ -167,7 +170,7 @@ class BASE_EXPORT SchedulerWorkerPoolImpl : public SchedulerWorkerPool {
// permitted.
bool WakeUpOneWorkerLockRequired();
- // Adds a worker, if needed, to maintain one idle worker, |worker_capacity_|
+ // Adds a worker, if needed, to maintain one idle worker, |max_tasks_|
// permitting.
void MaintainAtLeastOneIdleWorkerLockRequired();
@@ -186,32 +189,35 @@ class BASE_EXPORT SchedulerWorkerPoolImpl : public SchedulerWorkerPool {
SchedulerWorker* CreateRegisterAndStartSchedulerWorkerLockRequired();
// Returns the number of workers in the pool that should not run tasks due to
- // the pool being over worker capacity.
+ // the pool being over capacity.
size_t NumberOfExcessWorkersLockRequired() const;
- // Examines the list of SchedulerWorkers and increments |worker_capacity_| for
- // each worker that has been within the scope of a MAY_BLOCK
- // ScopedBlockingCall for more than BlockedThreshold().
- void AdjustWorkerCapacity();
+ // Examines the list of SchedulerWorkers and increments |max_tasks_| for each
+ // worker that has been within the scope of a MAY_BLOCK ScopedBlockingCall for
+ // more than BlockedThreshold().
+ void AdjustMaxTasks();
- // Returns the threshold after which the worker capacity is increased to
- // compensate for a worker that is within a MAY_BLOCK ScopedBlockingCall.
+ // Returns the threshold after which the max tasks is increased to compensate
+ // for a worker that is within a MAY_BLOCK ScopedBlockingCall.
TimeDelta MayBlockThreshold() const;
- // Starts calling AdjustWorkerCapacity() periodically on
+ // Starts calling AdjustMaxTasks() periodically on
// |service_thread_task_runner_| if not already requested.
- void PostAdjustWorkerCapacityTaskIfNeeded();
+ void ScheduleAdjustMaxTasksIfNeeded();
- // Calls AdjustWorkerCapacity() and schedules it again as necessary. May only
- // be called from the service thread.
- void AdjustWorkerCapacityTaskFunction();
+ // Calls AdjustMaxTasks() and schedules it again as necessary. May only be
+ // called from the service thread.
+ void AdjustMaxTasksFunction();
- // Returns true if AdjustWorkerCapacity() should periodically be called on
+ // Returns true if AdjustMaxTasks() should periodically be called on
// |service_thread_task_runner_|.
- bool ShouldPeriodicallyAdjustWorkerCapacityLockRequired();
+ bool ShouldPeriodicallyAdjustMaxTasksLockRequired();
- void DecrementWorkerCapacityLockRequired();
- void IncrementWorkerCapacityLockRequired();
+ // Increments/decrements the number of tasks that can run in this pool.
+ // |is_running_background_task| indicates whether the worker causing the
+ // change is currently running a TaskPriority::BACKGROUND task.
+ void DecrementMaxTasksLockRequired(bool is_running_background_task);
+ void IncrementMaxTasksLockRequired(bool is_running_background_task);
const std::string pool_label_;
const ThreadPriority priority_hint_;
@@ -225,15 +231,15 @@ class BASE_EXPORT SchedulerWorkerPoolImpl : public SchedulerWorkerPool {
SchedulerBackwardCompatibility backward_compatibility_;
- // Synchronizes accesses to |workers_|, |worker_capacity_|,
- // |num_pending_may_block_workers_|, |idle_workers_stack_|,
- // |idle_workers_stack_cv_for_testing_|, |num_wake_ups_before_start_|,
- // |cleanup_timestamps_|, |polling_worker_capacity_|,
+ // Synchronizes accesses to |workers_|, |max_tasks_|, |max_background_tasks_|,
+ // |num_running_background_tasks_|, |num_pending_may_block_workers_|,
+ // |idle_workers_stack_|, |idle_workers_stack_cv_for_testing_|,
+ // |num_wake_ups_before_start_|, |cleanup_timestamps_|, |polling_max_tasks_|,
// |worker_cleanup_disallowed_for_testing_|,
// |num_workers_cleaned_up_for_testing_|,
// |SchedulerWorkerDelegateImpl::is_on_idle_workers_stack_|,
- // |SchedulerWorkerDelegateImpl::incremented_worker_capacity_since_blocked_|
- // and |SchedulerWorkerDelegateImpl::may_block_start_time_|. Has
+ // |SchedulerWorkerDelegateImpl::incremented_max_tasks_since_blocked_| and
+ // |SchedulerWorkerDelegateImpl::may_block_start_time_|. Has
// |shared_priority_queue_|'s lock as its predecessor so that a worker can be
// pushed to |idle_workers_stack_| within the scope of a Transaction (more
// details in GetWork()).
@@ -242,17 +248,29 @@ class BASE_EXPORT SchedulerWorkerPoolImpl : public SchedulerWorkerPool {
// All workers owned by this worker pool.
std::vector<scoped_refptr<SchedulerWorker>> workers_;
- // Workers can be added as needed up until there are |worker_capacity_|
- // workers.
- size_t worker_capacity_ = 0;
+ // The maximum number of tasks that can run concurrently in this pool. Workers
+ // can be added as needed up until there are |max_tasks_| workers.
+ size_t max_tasks_ = 0;
- // Initial value of |worker_capacity_| as set in Start().
- size_t initial_worker_capacity_ = 0;
+ // Initial value of |max_tasks_| as set in Start().
+ size_t initial_max_tasks_ = 0;
- // Number workers that are within the scope of a MAY_BLOCK ScopedBlockingCall
- // but haven't caused a worker capacity increase yet.
+ // The maximum number of background tasks that can run concurrently in this
+ // pool.
+ int max_background_tasks_ = 0;
+
+ // The number of background tasks that are currently running in this pool.
+ int num_running_background_tasks_ = 0;
+
+ // Number of workers that are within the scope of a MAY_BLOCK
+ // ScopedBlockingCall but haven't caused a max task increase yet.
int num_pending_may_block_workers_ = 0;
+ // Number of workers that are running a TaskPriority::BACKGROUND task and are
+ // within the scope of a MAY_BLOCK ScopedBlockingCall but haven't caused a max
+ // task increase yet.
+ int num_pending_background_may_block_workers_ = 0;
+
// Environment to be initialized per worker.
WorkerEnvironment worker_environment_ = WorkerEnvironment::NONE;
@@ -274,9 +292,8 @@ class BASE_EXPORT SchedulerWorkerPoolImpl : public SchedulerWorkerPool {
// Timestamps get popped off the stack as new workers are added.
base::stack<TimeTicks, std::vector<TimeTicks>> cleanup_timestamps_;
- // Whether we are currently polling for necessary adjustments to
- // |worker_capacity_|.
- bool polling_worker_capacity_ = false;
+ // Whether we are currently polling for necessary adjustments to |max_tasks_|.
+ bool polling_max_tasks_ = false;
// Indicates to the delegates that workers are not permitted to cleanup.
bool worker_cleanup_disallowed_for_testing_ = false;
diff --git a/chromium/base/task_scheduler/scheduler_worker_pool_impl_unittest.cc b/chromium/base/task_scheduler/scheduler_worker_pool_impl_unittest.cc
index 5f099b3904b..f5101947704 100644
--- a/chromium/base/task_scheduler/scheduler_worker_pool_impl_unittest.cc
+++ b/chromium/base/task_scheduler/scheduler_worker_pool_impl_unittest.cc
@@ -21,6 +21,7 @@
#include "base/metrics/histogram.h"
#include "base/metrics/histogram_samples.h"
#include "base/metrics/statistics_recorder.h"
+#include "base/synchronization/atomic_flag.h"
#include "base/synchronization/condition_variable.h"
#include "base/synchronization/lock.h"
#include "base/synchronization/waitable_event.h"
@@ -32,6 +33,7 @@
#include "base/task_scheduler/task_tracker.h"
#include "base/task_scheduler/test_task_factory.h"
#include "base/task_scheduler/test_utils.h"
+#include "base/test/bind_test_util.h"
#include "base/test/gtest_util.h"
#include "base/test/test_simple_task_runner.h"
#include "base/test/test_timeouts.h"
@@ -43,6 +45,7 @@
#include "base/threading/thread_local_storage.h"
#include "base/threading/thread_restrictions.h"
#include "base/time/time.h"
+#include "base/timer/timer.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -54,7 +57,7 @@ namespace base {
namespace internal {
namespace {
-constexpr size_t kNumWorkersInWorkerPool = 4;
+constexpr size_t kMaxTasks = 4;
constexpr size_t kNumThreadsPostingTasks = 4;
constexpr size_t kNumTasksPostedPerThread = 150;
// This can't be lower because Windows' WaitableEvent wakes up too early when a
@@ -64,9 +67,10 @@ constexpr TimeDelta kReclaimTimeForCleanupTests =
TimeDelta::FromMilliseconds(500);
// Waits on |event| in a scope where the blocking observer is null, to avoid
-// affecting the worker capacity.
+// affecting the max tasks.
void WaitWithoutBlockingObserver(WaitableEvent* event) {
internal::ScopedClearBlockingObserverForTesting clear_blocking_observer;
+ ScopedAllowBaseSyncPrimitivesForTesting allow_base_sync_primitives;
event->Wait();
}
@@ -75,15 +79,15 @@ class TaskSchedulerWorkerPoolImplTestBase {
TaskSchedulerWorkerPoolImplTestBase()
: service_thread_("TaskSchedulerServiceThread"){};
- void CommonSetUp() {
- CreateAndStartWorkerPool(TimeDelta::Max(), kNumWorkersInWorkerPool);
+ void CommonSetUp(TimeDelta suggested_reclaim_time = TimeDelta::Max()) {
+ CreateAndStartWorkerPool(suggested_reclaim_time, kMaxTasks);
}
void CommonTearDown() {
service_thread_.Stop();
task_tracker_.FlushForTesting();
- worker_pool_->WaitForAllWorkersIdleForTesting();
- worker_pool_->JoinForTesting();
+ if (worker_pool_)
+ worker_pool_->JoinForTesting();
}
void CreateWorkerPool() {
@@ -97,18 +101,18 @@ class TaskSchedulerWorkerPoolImplTestBase {
}
virtual void StartWorkerPool(TimeDelta suggested_reclaim_time,
- size_t num_workers) {
+ size_t max_tasks) {
ASSERT_TRUE(worker_pool_);
worker_pool_->Start(
- SchedulerWorkerPoolParams(num_workers, suggested_reclaim_time),
+ SchedulerWorkerPoolParams(max_tasks, suggested_reclaim_time), max_tasks,
service_thread_.task_runner(), nullptr,
SchedulerWorkerPoolImpl::WorkerEnvironment::NONE);
}
void CreateAndStartWorkerPool(TimeDelta suggested_reclaim_time,
- size_t num_workers) {
+ size_t max_tasks) {
CreateWorkerPool();
- StartWorkerPool(suggested_reclaim_time, num_workers);
+ StartWorkerPool(suggested_reclaim_time, max_tasks);
}
Thread service_thread_;
@@ -219,10 +223,9 @@ TEST_P(TaskSchedulerWorkerPoolImplTestParam, PostTasksWithOneAvailableWorker) {
// Post blocking tasks to keep all workers busy except one until |event| is
// signaled. Use different factories so that tasks are added to different
// sequences and can run simultaneously when the execution mode is SEQUENCED.
- WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent event;
std::vector<std::unique_ptr<test::TestTaskFactory>> blocked_task_factories;
- for (size_t i = 0; i < (kNumWorkersInWorkerPool - 1); ++i) {
+ for (size_t i = 0; i < (kMaxTasks - 1); ++i) {
blocked_task_factories.push_back(std::make_unique<test::TestTaskFactory>(
CreateTaskRunnerWithExecutionMode(worker_pool_.get(), GetParam()),
GetParam()));
@@ -250,14 +253,13 @@ TEST_P(TaskSchedulerWorkerPoolImplTestParam, PostTasksWithOneAvailableWorker) {
}
TEST_P(TaskSchedulerWorkerPoolImplTestParam, Saturate) {
- // Verify that it is possible to have |kNumWorkersInWorkerPool|
- // tasks/sequences running simultaneously. Use different factories so that the
- // blocking tasks are added to different sequences and can run simultaneously
- // when the execution mode is SEQUENCED.
- WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ // Verify that it is possible to have |kMaxTasks| tasks/sequences running
+ // simultaneously. Use different factories so that the blocking tasks are
+ // added to different sequences and can run simultaneously when the execution
+ // mode is SEQUENCED.
+ WaitableEvent event;
std::vector<std::unique_ptr<test::TestTaskFactory>> factories;
- for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) {
+ for (size_t i = 0; i < kMaxTasks; ++i) {
factories.push_back(std::make_unique<test::TestTaskFactory>(
CreateTaskRunnerWithExecutionMode(worker_pool_.get(), GetParam()),
GetParam()));
@@ -282,8 +284,7 @@ TEST_P(TaskSchedulerWorkerPoolImplTestParam, NoEnvironment) {
scoped_refptr<TaskRunner> task_runner =
CreateTaskRunnerWithExecutionMode(worker_pool_.get(), GetParam());
- WaitableEvent task_running(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent task_running;
task_runner->PostTask(
FROM_HERE, BindOnce(
[](WaitableEvent* task_running) {
@@ -323,10 +324,10 @@ class TaskSchedulerWorkerPoolImplTestCOMMTAParam
private:
void StartWorkerPool(TimeDelta suggested_reclaim_time,
- size_t num_workers) override {
+ size_t max_tasks) override {
ASSERT_TRUE(worker_pool_);
worker_pool_->Start(
- SchedulerWorkerPoolParams(num_workers, suggested_reclaim_time),
+ SchedulerWorkerPoolParams(max_tasks, suggested_reclaim_time), max_tasks,
service_thread_.task_runner(), nullptr,
SchedulerWorkerPoolImpl::WorkerEnvironment::COM_MTA);
}
@@ -341,8 +342,7 @@ TEST_P(TaskSchedulerWorkerPoolImplTestCOMMTAParam, COMMTAInitialized) {
scoped_refptr<TaskRunner> task_runner =
CreateTaskRunnerWithExecutionMode(worker_pool_.get(), GetParam());
- WaitableEvent task_running(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent task_running;
task_runner->PostTask(
FROM_HERE, BindOnce(
[](WaitableEvent* task_running) {
@@ -367,7 +367,7 @@ INSTANTIATE_TEST_CASE_P(Sequenced,
namespace {
-class TaskSchedulerWorkerPoolImplPostTaskBeforeStartTest
+class TaskSchedulerWorkerPoolImplStartInBodyTest
: public TaskSchedulerWorkerPoolImplTest {
public:
void SetUp() override {
@@ -388,21 +388,17 @@ void TaskPostedBeforeStart(PlatformThreadRef* platform_thread_ref,
// Verify that 2 tasks posted before Start() to a SchedulerWorkerPoolImpl with
// more than 2 workers run on different workers when Start() is called.
-TEST_F(TaskSchedulerWorkerPoolImplPostTaskBeforeStartTest,
- PostTasksBeforeStart) {
+TEST_F(TaskSchedulerWorkerPoolImplStartInBodyTest, PostTasksBeforeStart) {
PlatformThreadRef task_1_thread_ref;
PlatformThreadRef task_2_thread_ref;
- WaitableEvent task_1_running(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
- WaitableEvent task_2_running(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent task_1_running;
+ WaitableEvent task_2_running;
// This event is used to prevent a task from completing before the other task
// starts running. If that happened, both tasks could run on the same worker
// and this test couldn't verify that the correct number of workers were woken
// up.
- WaitableEvent barrier(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent barrier;
worker_pool_->CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()})
->PostTask(
@@ -421,7 +417,7 @@ TEST_F(TaskSchedulerWorkerPoolImplPostTaskBeforeStartTest,
EXPECT_FALSE(task_1_running.IsSignaled());
EXPECT_FALSE(task_2_running.IsSignaled());
- StartWorkerPool(TimeDelta::Max(), kNumWorkersInWorkerPool);
+ StartWorkerPool(TimeDelta::Max(), kMaxTasks);
// Tasks should run shortly after the pool is started.
task_1_running.Wait();
@@ -435,23 +431,22 @@ TEST_F(TaskSchedulerWorkerPoolImplPostTaskBeforeStartTest,
}
// Verify that posting many tasks before Start will cause the number of workers
-// to grow to |worker_capacity_| during Start.
-TEST_F(TaskSchedulerWorkerPoolImplPostTaskBeforeStartTest, PostManyTasks) {
+// to grow to |max_tasks_| during Start.
+TEST_F(TaskSchedulerWorkerPoolImplStartInBodyTest, PostManyTasks) {
scoped_refptr<TaskRunner> task_runner =
worker_pool_->CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()});
- constexpr size_t kNumTasksPosted = 2 * kNumWorkersInWorkerPool;
+ constexpr size_t kNumTasksPosted = 2 * kMaxTasks;
for (size_t i = 0; i < kNumTasksPosted; ++i)
task_runner->PostTask(FROM_HERE, DoNothing());
EXPECT_EQ(0U, worker_pool_->NumberOfWorkersForTesting());
- StartWorkerPool(TimeDelta::Max(), kNumWorkersInWorkerPool);
- ASSERT_GT(kNumTasksPosted, worker_pool_->GetWorkerCapacityForTesting());
- EXPECT_EQ(kNumWorkersInWorkerPool,
- worker_pool_->GetWorkerCapacityForTesting());
+ StartWorkerPool(TimeDelta::Max(), kMaxTasks);
+ ASSERT_GT(kNumTasksPosted, worker_pool_->GetMaxTasksForTesting());
+ EXPECT_EQ(kMaxTasks, worker_pool_->GetMaxTasksForTesting());
EXPECT_EQ(worker_pool_->NumberOfWorkersForTesting(),
- worker_pool_->GetWorkerCapacityForTesting());
+ worker_pool_->GetMaxTasksForTesting());
}
namespace {
@@ -475,13 +470,10 @@ class TaskSchedulerWorkerPoolCheckTlsReuse
}
protected:
- TaskSchedulerWorkerPoolCheckTlsReuse() :
- waiter_(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED) {}
+ TaskSchedulerWorkerPoolCheckTlsReuse() = default;
void SetUp() override {
- CreateAndStartWorkerPool(kReclaimTimeForCleanupTests,
- kNumWorkersInWorkerPool);
+ CreateAndStartWorkerPool(kReclaimTimeForCleanupTests, kMaxTasks);
}
subtle::Atomic32 zero_tls_values_ = 0;
@@ -500,7 +492,7 @@ class TaskSchedulerWorkerPoolCheckTlsReuse
TEST_F(TaskSchedulerWorkerPoolCheckTlsReuse, CheckCleanupWorkers) {
// Saturate the workers and mark each worker's thread with a magic TLS value.
std::vector<std::unique_ptr<test::TestTaskFactory>> factories;
- for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) {
+ for (size_t i = 0; i < kMaxTasks; ++i) {
factories.push_back(std::make_unique<test::TestTaskFactory>(
worker_pool_->CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()}),
test::ExecutionMode::PARALLEL));
@@ -525,9 +517,7 @@ TEST_F(TaskSchedulerWorkerPoolCheckTlsReuse, CheckCleanupWorkers) {
// If the value is not there, that means we're at a new worker.
std::vector<std::unique_ptr<WaitableEvent>> count_waiters;
for (auto& factory : factories) {
- count_waiters.push_back(WrapUnique(new WaitableEvent(
- WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED)));
+ count_waiters.push_back(std::make_unique<WaitableEvent>());
ASSERT_TRUE(factory->PostTask(
PostNestedTask::NO,
Bind(&TaskSchedulerWorkerPoolCheckTlsReuse::CountZeroTlsValuesAndWait,
@@ -569,14 +559,13 @@ class TaskSchedulerWorkerPoolHistogramTest
auto task_runner =
worker_pool_->CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()});
- const auto pool_capacity = worker_pool_->GetWorkerCapacityForTesting();
+ const auto max_tasks = worker_pool_->GetMaxTasksForTesting();
- WaitableEvent workers_flooded(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent workers_flooded;
RepeatingClosure all_workers_running_barrier = BarrierClosure(
- pool_capacity,
+ max_tasks,
BindOnce(&WaitableEvent::Signal, Unretained(&workers_flooded)));
- for (size_t i = 0; i < pool_capacity; ++i) {
+ for (size_t i = 0; i < max_tasks; ++i) {
task_runner->PostTask(
FROM_HERE,
BindOnce(
@@ -599,9 +588,8 @@ class TaskSchedulerWorkerPoolHistogramTest
} // namespace
TEST_F(TaskSchedulerWorkerPoolHistogramTest, NumTasksBetweenWaits) {
- WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
- CreateAndStartWorkerPool(TimeDelta::Max(), kNumWorkersInWorkerPool);
+ WaitableEvent event;
+ CreateAndStartWorkerPool(TimeDelta::Max(), kMaxTasks);
auto task_runner = worker_pool_->CreateSequencedTaskRunnerWithTraits(
{WithBaseSyncPrimitives()});
@@ -636,13 +624,10 @@ TEST_F(TaskSchedulerWorkerPoolHistogramTest, NumTasksBetweenWaits) {
// idle and cleanup periods.
TEST_F(TaskSchedulerWorkerPoolHistogramTest,
NumTasksBetweenWaitsWithIdlePeriodAndCleanup) {
- WaitableEvent tasks_can_exit_event(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
- CreateAndStartWorkerPool(kReclaimTimeForCleanupTests,
- kNumWorkersInWorkerPool);
+ WaitableEvent tasks_can_exit_event;
+ CreateAndStartWorkerPool(kReclaimTimeForCleanupTests, kMaxTasks);
- WaitableEvent workers_continue(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent workers_continue;
FloodPool(&workers_continue);
@@ -661,14 +646,14 @@ TEST_F(TaskSchedulerWorkerPoolHistogramTest,
// histogram being reported when going idle and each worker having processed
// precisely 1 task per the controlled flooding logic above.
EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(0));
- EXPECT_EQ(static_cast<int>(kNumWorkersInWorkerPool),
+ EXPECT_EQ(static_cast<int>(kMaxTasks),
histogram->SnapshotSamples()->GetCount(1));
EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(10));
- worker_pool_->WaitForWorkersCleanedUpForTesting(kNumWorkersInWorkerPool - 1);
+ worker_pool_->WaitForWorkersCleanedUpForTesting(kMaxTasks - 1);
EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(0));
- EXPECT_EQ(static_cast<int>(kNumWorkersInWorkerPool),
+ EXPECT_EQ(static_cast<int>(kMaxTasks),
histogram->SnapshotSamples()->GetCount(1));
EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(10));
@@ -679,7 +664,7 @@ TEST_F(TaskSchedulerWorkerPoolHistogramTest,
FloodPool(&workers_continue);
EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(0));
- EXPECT_EQ(static_cast<int>(kNumWorkersInWorkerPool),
+ EXPECT_EQ(static_cast<int>(kMaxTasks),
histogram->SnapshotSamples()->GetCount(1));
EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(10));
@@ -712,12 +697,8 @@ TEST_F(TaskSchedulerWorkerPoolHistogramTest, NumTasksBeforeCleanup) {
},
Unretained(&thread_ref)));
- WaitableEvent cleanup_thread_running(
- WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
- WaitableEvent cleanup_thread_continue(
- WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent cleanup_thread_running;
+ WaitableEvent cleanup_thread_continue;
histogrammed_thread_task_runner->PostTask(
FROM_HERE,
BindOnce(
@@ -758,12 +739,8 @@ TEST_F(TaskSchedulerWorkerPoolHistogramTest, NumTasksBeforeCleanup) {
// release and go idle first and then |task_runner_for_top_idle| should
// release and go idle. This allows the SchedulerWorker associated with
// |histogrammed_thread_task_runner| to cleanup.
- WaitableEvent top_idle_thread_running(
- WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
- WaitableEvent top_idle_thread_continue(
- WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent top_idle_thread_running;
+ WaitableEvent top_idle_thread_continue;
auto task_runner_for_top_idle =
worker_pool_->CreateSequencedTaskRunnerWithTraits(
{WithBaseSyncPrimitives()});
@@ -803,78 +780,163 @@ TEST_F(TaskSchedulerWorkerPoolHistogramTest, NumTasksBeforeCleanup) {
EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(10));
}
-TEST(TaskSchedulerWorkerPoolStandbyPolicyTest, InitOne) {
- TaskTracker task_tracker("Test");
- DelayedTaskManager delayed_task_manager;
- scoped_refptr<TaskRunner> service_thread_task_runner =
- MakeRefCounted<TestSimpleTaskRunner>();
- delayed_task_manager.Start(service_thread_task_runner);
- auto worker_pool = std::make_unique<SchedulerWorkerPoolImpl>(
- "OnePolicyWorkerPool", "A", ThreadPriority::NORMAL,
- task_tracker.GetTrackedRef(), &delayed_task_manager);
- worker_pool->Start(SchedulerWorkerPoolParams(8U, TimeDelta::Max()),
- service_thread_task_runner, nullptr,
- SchedulerWorkerPoolImpl::WorkerEnvironment::NONE);
- ASSERT_TRUE(worker_pool);
- EXPECT_EQ(1U, worker_pool->NumberOfWorkersForTesting());
- worker_pool->JoinForTesting();
-}
+namespace {
-// Verify the SchedulerWorkerPoolImpl keeps at least one idle standby thread,
-// capacity permitting.
-TEST(TaskSchedulerWorkerPoolStandbyPolicyTest, VerifyStandbyThread) {
- constexpr size_t kWorkerCapacity = 3;
+class TaskSchedulerWorkerPoolStandbyPolicyTest
+ : public TaskSchedulerWorkerPoolImplTestBase,
+ public testing::Test {
+ public:
+ TaskSchedulerWorkerPoolStandbyPolicyTest() = default;
- TaskTracker task_tracker("Test");
- DelayedTaskManager delayed_task_manager;
- scoped_refptr<TaskRunner> service_thread_task_runner =
- MakeRefCounted<TestSimpleTaskRunner>();
- delayed_task_manager.Start(service_thread_task_runner);
- auto worker_pool = std::make_unique<SchedulerWorkerPoolImpl>(
- "StandbyThreadWorkerPool", "A", ThreadPriority::NORMAL,
- task_tracker.GetTrackedRef(), &delayed_task_manager);
- worker_pool->Start(
- SchedulerWorkerPoolParams(kWorkerCapacity, kReclaimTimeForCleanupTests),
- service_thread_task_runner, nullptr,
- SchedulerWorkerPoolImpl::WorkerEnvironment::NONE);
- ASSERT_TRUE(worker_pool);
- EXPECT_EQ(1U, worker_pool->NumberOfWorkersForTesting());
+ void SetUp() override {
+ TaskSchedulerWorkerPoolImplTestBase::CommonSetUp(
+ kReclaimTimeForCleanupTests);
+ }
+ void TearDown() override {
+ TaskSchedulerWorkerPoolImplTestBase::CommonTearDown();
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TaskSchedulerWorkerPoolStandbyPolicyTest);
+};
+
+} // namespace
+
+TEST_F(TaskSchedulerWorkerPoolStandbyPolicyTest, InitOne) {
+ EXPECT_EQ(1U, worker_pool_->NumberOfWorkersForTesting());
+}
+
+// Verify that the SchedulerWorkerPoolImpl keeps at least one idle standby
+// thread, capacity permitting.
+TEST_F(TaskSchedulerWorkerPoolStandbyPolicyTest, VerifyStandbyThread) {
auto task_runner =
- worker_pool->CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()});
+ worker_pool_->CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()});
- WaitableEvent thread_running(WaitableEvent::ResetPolicy::AUTOMATIC,
- WaitableEvent::InitialState::NOT_SIGNALED);
- WaitableEvent thread_continue(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent thread_running(WaitableEvent::ResetPolicy::AUTOMATIC);
+ WaitableEvent threads_continue;
- RepeatingClosure closure = BindRepeating(
- [](WaitableEvent* thread_running, WaitableEvent* thread_continue) {
- thread_running->Signal();
- WaitWithoutBlockingObserver(thread_continue);
- },
- Unretained(&thread_running), Unretained(&thread_continue));
+ RepeatingClosure thread_blocker = BindLambdaForTesting([&]() {
+ thread_running.Signal();
+ WaitWithoutBlockingObserver(&threads_continue);
+ });
- // There should be one idle thread until we reach worker capacity
- for (size_t i = 0; i < kWorkerCapacity; ++i) {
- EXPECT_EQ(i + 1, worker_pool->NumberOfWorkersForTesting());
- task_runner->PostTask(FROM_HERE, closure);
+ // There should be one idle thread until we reach capacity
+ for (size_t i = 0; i < kMaxTasks; ++i) {
+ EXPECT_EQ(i + 1, worker_pool_->NumberOfWorkersForTesting());
+ task_runner->PostTask(FROM_HERE, thread_blocker);
thread_running.Wait();
}
// There should not be an extra idle thread if it means going above capacity
- EXPECT_EQ(kWorkerCapacity, worker_pool->NumberOfWorkersForTesting());
+ EXPECT_EQ(kMaxTasks, worker_pool_->NumberOfWorkersForTesting());
- thread_continue.Signal();
+ threads_continue.Signal();
// Wait long enough for all but one worker to clean up.
- worker_pool->WaitForWorkersCleanedUpForTesting(kWorkerCapacity - 1);
- EXPECT_EQ(1U, worker_pool->NumberOfWorkersForTesting());
+ worker_pool_->WaitForWorkersCleanedUpForTesting(kMaxTasks - 1);
+ EXPECT_EQ(1U, worker_pool_->NumberOfWorkersForTesting());
// Give extra time for a worker to cleanup : none should as the pool is
// expected to keep a worker ready regardless of how long it was idle for.
PlatformThread::Sleep(kReclaimTimeForCleanupTests);
- EXPECT_EQ(1U, worker_pool->NumberOfWorkersForTesting());
+ EXPECT_EQ(1U, worker_pool_->NumberOfWorkersForTesting());
+}
- worker_pool->JoinForTesting();
+// Verify that being "the" idle thread counts as being active (i.e. won't be
+// reclaimed even if not on top of the idle stack when reclaim timeout expires).
+// Regression test for https://crbug.com/847501.
+TEST_F(TaskSchedulerWorkerPoolStandbyPolicyTest,
+ InAndOutStandbyThreadIsActive) {
+ auto sequenced_task_runner =
+ worker_pool_->CreateSequencedTaskRunnerWithTraits({});
+
+ WaitableEvent timer_started;
+
+ RepeatingTimer recurring_task;
+ sequenced_task_runner->PostTask(
+ FROM_HERE, BindLambdaForTesting([&]() {
+ recurring_task.Start(FROM_HERE, kReclaimTimeForCleanupTests / 2,
+ DoNothing());
+ timer_started.Signal();
+ }));
+
+ timer_started.Wait();
+
+ // Running a task should have brought up a new standby thread.
+ EXPECT_EQ(2U, worker_pool_->NumberOfWorkersForTesting());
+
+ // Give extra time for a worker to cleanup : none should as the two workers
+ // are both considered "active" per the timer ticking faster than the reclaim
+ // timeout.
+ PlatformThread::Sleep(kReclaimTimeForCleanupTests * 2);
+ EXPECT_EQ(2U, worker_pool_->NumberOfWorkersForTesting());
+
+ sequenced_task_runner->PostTask(
+ FROM_HERE, BindLambdaForTesting([&]() { recurring_task.Stop(); }));
+
+ // Stopping the recurring task should let the second worker be reclaimed per
+ // not being "the" standby thread for a full reclaim timeout.
+ worker_pool_->WaitForWorkersCleanedUpForTesting(1);
+ EXPECT_EQ(1U, worker_pool_->NumberOfWorkersForTesting());
+}
+
+// Verify that being "the" idle thread counts as being active but isn't sticky.
+// Regression test for https://crbug.com/847501.
+TEST_F(TaskSchedulerWorkerPoolStandbyPolicyTest, OnlyKeepActiveStandbyThreads) {
+ auto sequenced_task_runner =
+ worker_pool_->CreateSequencedTaskRunnerWithTraits({});
+
+ // Start this test like
+ // TaskSchedulerWorkerPoolStandbyPolicyTest.InAndOutStandbyThreadIsActive and
+ // give it some time to stabilize.
+ RepeatingTimer recurring_task;
+ sequenced_task_runner->PostTask(
+ FROM_HERE, BindLambdaForTesting([&]() {
+ recurring_task.Start(FROM_HERE, kReclaimTimeForCleanupTests / 2,
+ DoNothing());
+ }));
+
+ PlatformThread::Sleep(kReclaimTimeForCleanupTests * 2);
+ EXPECT_EQ(2U, worker_pool_->NumberOfWorkersForTesting());
+
+ // Then also flood the pool (cycling the top of the idle stack).
+ {
+ auto task_runner =
+ worker_pool_->CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()});
+
+ WaitableEvent thread_running(WaitableEvent::ResetPolicy::AUTOMATIC);
+ WaitableEvent threads_continue;
+
+ RepeatingClosure thread_blocker = BindLambdaForTesting([&]() {
+ thread_running.Signal();
+ WaitWithoutBlockingObserver(&threads_continue);
+ });
+
+ for (size_t i = 0; i < kMaxTasks; ++i) {
+ task_runner->PostTask(FROM_HERE, thread_blocker);
+ thread_running.Wait();
+ }
+
+ EXPECT_EQ(kMaxTasks, worker_pool_->NumberOfWorkersForTesting());
+ threads_continue.Signal();
+
+ // Flush to ensure all references to |threads_continue| are gone before it
+ // goes out of scope.
+ task_tracker_.FlushForTesting();
+ }
+
+ // All workers should clean up but two (since the timer is still running).
+ worker_pool_->WaitForWorkersCleanedUpForTesting(kMaxTasks - 2);
+ EXPECT_EQ(2U, worker_pool_->NumberOfWorkersForTesting());
+
+ // Extra time shouldn't change this.
+ PlatformThread::Sleep(kReclaimTimeForCleanupTests * 2);
+ EXPECT_EQ(2U, worker_pool_->NumberOfWorkersForTesting());
+
+ // Stopping the timer should let the number of active threads go down to one.
+ sequenced_task_runner->PostTask(
+ FROM_HERE, BindLambdaForTesting([&]() { recurring_task.Stop(); }));
+ worker_pool_->WaitForWorkersCleanedUpForTesting(1);
+ EXPECT_EQ(1U, worker_pool_->NumberOfWorkersForTesting());
}
namespace {
@@ -922,11 +984,7 @@ class TaskSchedulerWorkerPoolBlockingTest
: public TaskSchedulerWorkerPoolImplTestBase,
public testing::TestWithParam<NestedBlockingType> {
public:
- TaskSchedulerWorkerPoolBlockingTest()
- : blocking_thread_running_(WaitableEvent::ResetPolicy::AUTOMATIC,
- WaitableEvent::InitialState::NOT_SIGNALED),
- blocking_thread_continue_(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED) {}
+ TaskSchedulerWorkerPoolBlockingTest() = default;
static std::string ParamInfoToString(
::testing::TestParamInfo<NestedBlockingType> param_info) {
@@ -955,130 +1013,119 @@ class TaskSchedulerWorkerPoolBlockingTest
// unblocked, then exits.
void SaturateWithBlockingTasks(
const NestedBlockingType& nested_blocking_type) {
- RepeatingClosure blocking_thread_running_closure =
- BarrierClosure(kNumWorkersInWorkerPool,
- BindOnce(&WaitableEvent::Signal,
- Unretained(&blocking_thread_running_)));
+ ASSERT_FALSE(blocking_threads_running_.IsSignaled());
- for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) {
+ RepeatingClosure blocking_threads_running_closure = BarrierClosure(
+ kMaxTasks, BindOnce(&WaitableEvent::Signal,
+ Unretained(&blocking_threads_running_)));
+
+ for (size_t i = 0; i < kMaxTasks; ++i) {
task_runner_->PostTask(
FROM_HERE,
BindOnce(
- [](Closure* blocking_thread_running_closure,
- WaitableEvent* blocking_thread_continue_,
+ [](Closure* blocking_threads_running_closure,
+ WaitableEvent* blocking_threads_continue_,
const NestedBlockingType& nested_blocking_type) {
NestedScopedBlockingCall nested_scoped_blocking_call(
nested_blocking_type);
- blocking_thread_running_closure->Run();
- WaitWithoutBlockingObserver(blocking_thread_continue_);
+ blocking_threads_running_closure->Run();
+ WaitWithoutBlockingObserver(blocking_threads_continue_);
},
- Unretained(&blocking_thread_running_closure),
- Unretained(&blocking_thread_continue_), nested_blocking_type));
+ Unretained(&blocking_threads_running_closure),
+ Unretained(&blocking_threads_continue_), nested_blocking_type));
}
- blocking_thread_running_.Wait();
+ blocking_threads_running_.Wait();
}
- // Returns how long we can expect a change to |worker_capacity_| to occur
+ // Returns how long we can expect a change to |max_tasks_| to occur
// after a task has become blocked.
- TimeDelta GetWorkerCapacityChangeSleepTime() {
+ TimeDelta GetMaxTasksChangeSleepTime() {
return std::max(SchedulerWorkerPoolImpl::kBlockedWorkersPollPeriod,
worker_pool_->MayBlockThreshold()) +
TestTimeouts::tiny_timeout();
}
- // Waits indefinitely, until |worker_pool_|'s worker capacity increases to
- // |expected_worker_capacity|.
- void ExpectWorkerCapacityIncreasesTo(size_t expected_worker_capacity) {
- size_t capacity = worker_pool_->GetWorkerCapacityForTesting();
- while (capacity != expected_worker_capacity) {
- PlatformThread::Sleep(GetWorkerCapacityChangeSleepTime());
- size_t new_capacity = worker_pool_->GetWorkerCapacityForTesting();
- ASSERT_GE(new_capacity, capacity);
- capacity = new_capacity;
+ // Waits indefinitely, until |worker_pool_|'s max tasks increases to
+ // |expected_max_tasks|.
+ void ExpectMaxTasksIncreasesTo(size_t expected_max_tasks) {
+ size_t max_tasks = worker_pool_->GetMaxTasksForTesting();
+ while (max_tasks != expected_max_tasks) {
+ PlatformThread::Sleep(GetMaxTasksChangeSleepTime());
+ size_t new_max_tasks = worker_pool_->GetMaxTasksForTesting();
+ ASSERT_GE(new_max_tasks, max_tasks);
+ max_tasks = new_max_tasks;
}
}
// Unblocks tasks posted by SaturateWithBlockingTasks().
- void UnblockTasks() { blocking_thread_continue_.Signal(); }
+ void UnblockTasks() { blocking_threads_continue_.Signal(); }
scoped_refptr<TaskRunner> task_runner_;
private:
- WaitableEvent blocking_thread_running_;
- WaitableEvent blocking_thread_continue_;
+ WaitableEvent blocking_threads_running_;
+ WaitableEvent blocking_threads_continue_;
DISALLOW_COPY_AND_ASSIGN(TaskSchedulerWorkerPoolBlockingTest);
};
-// Verify that BlockingScopeEntered() causes worker capacity to increase and
-// creates a worker if needed. Also verify that BlockingScopeExited() decreases
-// worker capacity after an increase.
+// Verify that BlockingScopeEntered() causes max tasks to increase and creates a
+// worker if needed. Also verify that BlockingScopeExited() decreases max tasks
+// after an increase.
TEST_P(TaskSchedulerWorkerPoolBlockingTest, ThreadBlockedUnblocked) {
- ASSERT_EQ(worker_pool_->GetWorkerCapacityForTesting(),
- kNumWorkersInWorkerPool);
+ ASSERT_EQ(worker_pool_->GetMaxTasksForTesting(), kMaxTasks);
SaturateWithBlockingTasks(GetParam());
if (GetParam().behaves_as == BlockingType::MAY_BLOCK)
- ExpectWorkerCapacityIncreasesTo(2 * kNumWorkersInWorkerPool);
+ ExpectMaxTasksIncreasesTo(2 * kMaxTasks);
// A range of possible number of workers is accepted because of
// crbug.com/757897.
- EXPECT_GE(worker_pool_->NumberOfWorkersForTesting(),
- kNumWorkersInWorkerPool + 1);
- EXPECT_LE(worker_pool_->NumberOfWorkersForTesting(),
- 2 * kNumWorkersInWorkerPool);
- EXPECT_EQ(worker_pool_->GetWorkerCapacityForTesting(),
- 2 * kNumWorkersInWorkerPool);
+ EXPECT_GE(worker_pool_->NumberOfWorkersForTesting(), kMaxTasks + 1);
+ EXPECT_LE(worker_pool_->NumberOfWorkersForTesting(), 2 * kMaxTasks);
+ EXPECT_EQ(worker_pool_->GetMaxTasksForTesting(), 2 * kMaxTasks);
UnblockTasks();
task_tracker_.FlushForTesting();
- EXPECT_EQ(worker_pool_->GetWorkerCapacityForTesting(),
- kNumWorkersInWorkerPool);
+ EXPECT_EQ(worker_pool_->GetMaxTasksForTesting(), kMaxTasks);
}
// Verify that tasks posted in a saturated pool before a ScopedBlockingCall will
// execute after ScopedBlockingCall is instantiated.
TEST_P(TaskSchedulerWorkerPoolBlockingTest, PostBeforeBlocking) {
- WaitableEvent thread_running(WaitableEvent::ResetPolicy::AUTOMATIC,
- WaitableEvent::InitialState::NOT_SIGNALED);
- WaitableEvent thread_can_block(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
- WaitableEvent thread_continue(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
-
- for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) {
+ WaitableEvent thread_running(WaitableEvent::ResetPolicy::AUTOMATIC);
+ WaitableEvent thread_can_block;
+ WaitableEvent threads_continue;
+
+ for (size_t i = 0; i < kMaxTasks; ++i) {
task_runner_->PostTask(
FROM_HERE,
BindOnce(
[](const NestedBlockingType& nested_blocking_type,
WaitableEvent* thread_running, WaitableEvent* thread_can_block,
- WaitableEvent* thread_continue) {
+ WaitableEvent* threads_continue) {
thread_running->Signal();
WaitWithoutBlockingObserver(thread_can_block);
NestedScopedBlockingCall nested_scoped_blocking_call(
nested_blocking_type);
- WaitWithoutBlockingObserver(thread_continue);
+ WaitWithoutBlockingObserver(threads_continue);
},
GetParam(), Unretained(&thread_running),
- Unretained(&thread_can_block), Unretained(&thread_continue)));
+ Unretained(&thread_can_block), Unretained(&threads_continue)));
thread_running.Wait();
}
// All workers should be occupied and the pool should be saturated. Workers
// have not entered ScopedBlockingCall yet.
- EXPECT_EQ(worker_pool_->NumberOfWorkersForTesting(), kNumWorkersInWorkerPool);
- EXPECT_EQ(worker_pool_->GetWorkerCapacityForTesting(),
- kNumWorkersInWorkerPool);
-
- WaitableEvent extra_thread_running(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
- WaitableEvent extra_threads_continue(
- WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ EXPECT_EQ(worker_pool_->NumberOfWorkersForTesting(), kMaxTasks);
+ EXPECT_EQ(worker_pool_->GetMaxTasksForTesting(), kMaxTasks);
+
+ WaitableEvent extra_threads_running;
+ WaitableEvent extra_threads_continue;
RepeatingClosure extra_threads_running_barrier = BarrierClosure(
- kNumWorkersInWorkerPool,
- BindOnce(&WaitableEvent::Signal, Unretained(&extra_thread_running)));
- for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) {
+ kMaxTasks,
+ BindOnce(&WaitableEvent::Signal, Unretained(&extra_threads_running)));
+ for (size_t i = 0; i < kMaxTasks; ++i) {
task_runner_->PostTask(FROM_HERE,
BindOnce(
[](Closure* extra_threads_running_barrier,
@@ -1095,63 +1142,55 @@ TEST_P(TaskSchedulerWorkerPoolBlockingTest, PostBeforeBlocking) {
// tasks we just posted.
thread_can_block.Signal();
if (GetParam().behaves_as == BlockingType::MAY_BLOCK)
- ExpectWorkerCapacityIncreasesTo(2 * kNumWorkersInWorkerPool);
+ ExpectMaxTasksIncreasesTo(2 * kMaxTasks);
// Should not block forever.
- extra_thread_running.Wait();
- EXPECT_EQ(worker_pool_->NumberOfWorkersForTesting(),
- 2 * kNumWorkersInWorkerPool);
+ extra_threads_running.Wait();
+ EXPECT_EQ(worker_pool_->NumberOfWorkersForTesting(), 2 * kMaxTasks);
extra_threads_continue.Signal();
- thread_continue.Signal();
+ threads_continue.Signal();
task_tracker_.FlushForTesting();
}
// Verify that workers become idle when the pool is over-capacity and that
// those workers do no work.
TEST_P(TaskSchedulerWorkerPoolBlockingTest, WorkersIdleWhenOverCapacity) {
- ASSERT_EQ(worker_pool_->GetWorkerCapacityForTesting(),
- kNumWorkersInWorkerPool);
+ ASSERT_EQ(worker_pool_->GetMaxTasksForTesting(), kMaxTasks);
SaturateWithBlockingTasks(GetParam());
if (GetParam().behaves_as == BlockingType::MAY_BLOCK)
- ExpectWorkerCapacityIncreasesTo(2 * kNumWorkersInWorkerPool);
- EXPECT_EQ(worker_pool_->GetWorkerCapacityForTesting(),
- 2 * kNumWorkersInWorkerPool);
+ ExpectMaxTasksIncreasesTo(2 * kMaxTasks);
+ EXPECT_EQ(worker_pool_->GetMaxTasksForTesting(), 2 * kMaxTasks);
// A range of possible number of workers is accepted because of
// crbug.com/757897.
- EXPECT_GE(worker_pool_->NumberOfWorkersForTesting(),
- kNumWorkersInWorkerPool + 1);
- EXPECT_LE(worker_pool_->NumberOfWorkersForTesting(),
- 2 * kNumWorkersInWorkerPool);
-
- WaitableEvent thread_running(WaitableEvent::ResetPolicy::AUTOMATIC,
- WaitableEvent::InitialState::NOT_SIGNALED);
- WaitableEvent thread_continue(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
-
- RepeatingClosure thread_running_barrier = BarrierClosure(
- kNumWorkersInWorkerPool,
- BindOnce(&WaitableEvent::Signal, Unretained(&thread_running)));
+ EXPECT_GE(worker_pool_->NumberOfWorkersForTesting(), kMaxTasks + 1);
+ EXPECT_LE(worker_pool_->NumberOfWorkersForTesting(), 2 * kMaxTasks);
+
+ WaitableEvent threads_running;
+ WaitableEvent threads_continue;
+
+ RepeatingClosure threads_running_barrier = BarrierClosure(
+ kMaxTasks,
+ BindOnce(&WaitableEvent::Signal, Unretained(&threads_running)));
// Posting these tasks should cause new workers to be created.
- for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) {
+ for (size_t i = 0; i < kMaxTasks; ++i) {
auto callback = BindOnce(
- [](Closure* thread_running_barrier, WaitableEvent* thread_continue) {
- thread_running_barrier->Run();
- WaitWithoutBlockingObserver(thread_continue);
+ [](Closure* threads_running_barrier, WaitableEvent* threads_continue) {
+ threads_running_barrier->Run();
+ WaitWithoutBlockingObserver(threads_continue);
},
- Unretained(&thread_running_barrier), Unretained(&thread_continue));
+ Unretained(&threads_running_barrier), Unretained(&threads_continue));
task_runner_->PostTask(FROM_HERE, std::move(callback));
}
- thread_running.Wait();
+ threads_running.Wait();
ASSERT_EQ(worker_pool_->NumberOfIdleWorkersForTesting(), 0U);
- EXPECT_EQ(worker_pool_->NumberOfWorkersForTesting(),
- 2 * kNumWorkersInWorkerPool);
+ EXPECT_EQ(worker_pool_->NumberOfWorkersForTesting(), 2 * kMaxTasks);
AtomicFlag is_exiting;
// These tasks should not get executed until after other tasks become
// unblocked.
- for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) {
+ for (size_t i = 0; i < kMaxTasks; ++i) {
task_runner_->PostTask(FROM_HERE, BindOnce(
[](AtomicFlag* is_exiting) {
EXPECT_TRUE(is_exiting->IsSet());
@@ -1159,17 +1198,16 @@ TEST_P(TaskSchedulerWorkerPoolBlockingTest, WorkersIdleWhenOverCapacity) {
Unretained(&is_exiting)));
}
- // The original |kNumWorkersInWorkerPool| will finish their tasks after being
+ // The original |kMaxTasks| will finish their tasks after being
// unblocked. There will be work in the work queue, but the pool should now
// be over-capacity and workers will become idle.
UnblockTasks();
- worker_pool_->WaitForWorkersIdleForTesting(kNumWorkersInWorkerPool);
- EXPECT_EQ(worker_pool_->NumberOfIdleWorkersForTesting(),
- kNumWorkersInWorkerPool);
+ worker_pool_->WaitForWorkersIdleForTesting(kMaxTasks);
+ EXPECT_EQ(worker_pool_->NumberOfIdleWorkersForTesting(), kMaxTasks);
// Posting more tasks should not cause workers idle from the pool being over
// capacity to begin doing work.
- for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) {
+ for (size_t i = 0; i < kMaxTasks; ++i) {
task_runner_->PostTask(FROM_HERE, BindOnce(
[](AtomicFlag* is_exiting) {
EXPECT_TRUE(is_exiting->IsSet());
@@ -1183,7 +1221,7 @@ TEST_P(TaskSchedulerWorkerPoolBlockingTest, WorkersIdleWhenOverCapacity) {
is_exiting.Set();
// Unblocks the new workers.
- thread_continue.Signal();
+ threads_continue.Signal();
task_tracker_.FlushForTesting();
}
@@ -1205,54 +1243,45 @@ INSTANTIATE_TEST_CASE_P(
TaskSchedulerWorkerPoolBlockingTest::ParamInfoToString);
// Verify that if a thread enters the scope of a MAY_BLOCK ScopedBlockingCall,
-// but exits the scope before the MayBlockThreshold() is reached, that the
-// worker capacity does not increase.
+// but exits the scope before the MayBlockThreshold() is reached, that the max
+// tasks does not increase.
TEST_F(TaskSchedulerWorkerPoolBlockingTest, ThreadBlockUnblockPremature) {
- ASSERT_EQ(worker_pool_->GetWorkerCapacityForTesting(),
- kNumWorkersInWorkerPool);
+ ASSERT_EQ(worker_pool_->GetMaxTasksForTesting(), kMaxTasks);
- TimeDelta worker_capacity_change_sleep = GetWorkerCapacityChangeSleepTime();
+ TimeDelta max_tasks_change_sleep = GetMaxTasksChangeSleepTime();
worker_pool_->MaximizeMayBlockThresholdForTesting();
SaturateWithBlockingTasks(NestedBlockingType(BlockingType::MAY_BLOCK,
OptionalBlockingType::NO_BLOCK,
BlockingType::MAY_BLOCK));
- PlatformThread::Sleep(worker_capacity_change_sleep);
- EXPECT_EQ(worker_pool_->NumberOfWorkersForTesting(), kNumWorkersInWorkerPool);
- EXPECT_EQ(worker_pool_->GetWorkerCapacityForTesting(),
- kNumWorkersInWorkerPool);
+ PlatformThread::Sleep(max_tasks_change_sleep);
+ EXPECT_EQ(worker_pool_->NumberOfWorkersForTesting(), kMaxTasks);
+ EXPECT_EQ(worker_pool_->GetMaxTasksForTesting(), kMaxTasks);
UnblockTasks();
task_tracker_.FlushForTesting();
- EXPECT_EQ(worker_pool_->GetWorkerCapacityForTesting(),
- kNumWorkersInWorkerPool);
+ EXPECT_EQ(worker_pool_->GetMaxTasksForTesting(), kMaxTasks);
}
-// Verify that if worker capacity is incremented because of a MAY_BLOCK
+// Verify that if max tasks is incremented because of a MAY_BLOCK
// ScopedBlockingCall, it isn't incremented again when there is a nested
// WILL_BLOCK ScopedBlockingCall.
TEST_F(TaskSchedulerWorkerPoolBlockingTest,
MayBlockIncreaseCapacityNestedWillBlock) {
- ASSERT_EQ(worker_pool_->GetWorkerCapacityForTesting(),
- kNumWorkersInWorkerPool);
+ ASSERT_EQ(worker_pool_->GetMaxTasksForTesting(), kMaxTasks);
auto task_runner =
worker_pool_->CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()});
- WaitableEvent can_return(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent can_return;
// Saturate the pool so that a MAY_BLOCK ScopedBlockingCall would increment
- // the worker capacity.
- for (size_t i = 0; i < kNumWorkersInWorkerPool - 1; ++i) {
+ // the max tasks.
+ for (size_t i = 0; i < kMaxTasks - 1; ++i) {
task_runner->PostTask(FROM_HERE, BindOnce(&WaitWithoutBlockingObserver,
Unretained(&can_return)));
}
- WaitableEvent can_instantiate_will_block(
- WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
- WaitableEvent did_instantiate_will_block(
- WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent can_instantiate_will_block;
+ WaitableEvent did_instantiate_will_block;
// Post a task that instantiates a MAY_BLOCK ScopedBlockingCall.
task_runner->PostTask(
@@ -1270,28 +1299,26 @@ TEST_F(TaskSchedulerWorkerPoolBlockingTest,
Unretained(&can_instantiate_will_block),
Unretained(&did_instantiate_will_block), Unretained(&can_return)));
- // After a short delay, worker capacity should be incremented.
- ExpectWorkerCapacityIncreasesTo(kNumWorkersInWorkerPool + 1);
+ // After a short delay, max tasks should be incremented.
+ ExpectMaxTasksIncreasesTo(kMaxTasks + 1);
// Wait until the task instantiates a WILL_BLOCK ScopedBlockingCall.
can_instantiate_will_block.Signal();
did_instantiate_will_block.Wait();
- // Worker capacity shouldn't be incremented again.
- EXPECT_EQ(kNumWorkersInWorkerPool + 1,
- worker_pool_->GetWorkerCapacityForTesting());
+ // Max tasks shouldn't be incremented again.
+ EXPECT_EQ(kMaxTasks + 1, worker_pool_->GetMaxTasksForTesting());
// Tear down.
can_return.Signal();
task_tracker_.FlushForTesting();
- EXPECT_EQ(worker_pool_->GetWorkerCapacityForTesting(),
- kNumWorkersInWorkerPool);
+ EXPECT_EQ(worker_pool_->GetMaxTasksForTesting(), kMaxTasks);
}
// Verify that workers that become idle due to the pool being over capacity will
// eventually cleanup.
-TEST(TaskSchedulerWorkerPoolOverWorkerCapacityTest, VerifyCleanup) {
- constexpr size_t kWorkerCapacity = 3;
+TEST(TaskSchedulerWorkerPoolOverCapacityTest, VerifyCleanup) {
+ constexpr size_t kLocalMaxTasks = 3;
TaskTracker task_tracker("Test");
DelayedTaskManager delayed_task_manager;
@@ -1299,28 +1326,23 @@ TEST(TaskSchedulerWorkerPoolOverWorkerCapacityTest, VerifyCleanup) {
MakeRefCounted<TestSimpleTaskRunner>();
delayed_task_manager.Start(service_thread_task_runner);
SchedulerWorkerPoolImpl worker_pool(
- "OverWorkerCapacityTestWorkerPool", "A", ThreadPriority::NORMAL,
+ "OverCapacityTestWorkerPool", "A", ThreadPriority::NORMAL,
task_tracker.GetTrackedRef(), &delayed_task_manager);
worker_pool.Start(
- SchedulerWorkerPoolParams(kWorkerCapacity, kReclaimTimeForCleanupTests),
- service_thread_task_runner, nullptr,
+ SchedulerWorkerPoolParams(kLocalMaxTasks, kReclaimTimeForCleanupTests),
+ kLocalMaxTasks, service_thread_task_runner, nullptr,
SchedulerWorkerPoolImpl::WorkerEnvironment::NONE);
scoped_refptr<TaskRunner> task_runner =
worker_pool.CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()});
- WaitableEvent threads_running(WaitableEvent::ResetPolicy::AUTOMATIC,
- WaitableEvent::InitialState::NOT_SIGNALED);
- WaitableEvent threads_continue(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent threads_running;
+ WaitableEvent threads_continue;
RepeatingClosure threads_running_barrier = BarrierClosure(
- kWorkerCapacity,
+ kLocalMaxTasks,
BindOnce(&WaitableEvent::Signal, Unretained(&threads_running)));
- WaitableEvent blocked_call_continue(
- WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
-
+ WaitableEvent blocked_call_continue;
RepeatingClosure closure = BindRepeating(
[](Closure* threads_running_barrier, WaitableEvent* threads_continue,
WaitableEvent* blocked_call_continue) {
@@ -1334,23 +1356,19 @@ TEST(TaskSchedulerWorkerPoolOverWorkerCapacityTest, VerifyCleanup) {
Unretained(&threads_running_barrier), Unretained(&threads_continue),
Unretained(&blocked_call_continue));
- for (size_t i = 0; i < kWorkerCapacity; ++i)
+ for (size_t i = 0; i < kLocalMaxTasks; ++i)
task_runner->PostTask(FROM_HERE, closure);
threads_running.Wait();
- WaitableEvent extra_threads_running(
- WaitableEvent::ResetPolicy::AUTOMATIC,
- WaitableEvent::InitialState::NOT_SIGNALED);
- WaitableEvent extra_threads_continue(
- WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent extra_threads_running;
+ WaitableEvent extra_threads_continue;
RepeatingClosure extra_threads_running_barrier = BarrierClosure(
- kWorkerCapacity,
+ kLocalMaxTasks,
BindOnce(&WaitableEvent::Signal, Unretained(&extra_threads_running)));
- // These tasks should run on the new threads from increasing worker capacity.
- for (size_t i = 0; i < kWorkerCapacity; ++i) {
+ // These tasks should run on the new threads from increasing max tasks.
+ for (size_t i = 0; i < kLocalMaxTasks; ++i) {
task_runner->PostTask(FROM_HERE,
BindOnce(
[](Closure* extra_threads_running_barrier,
@@ -1364,8 +1382,8 @@ TEST(TaskSchedulerWorkerPoolOverWorkerCapacityTest, VerifyCleanup) {
}
extra_threads_running.Wait();
- ASSERT_EQ(kWorkerCapacity * 2, worker_pool.NumberOfWorkersForTesting());
- EXPECT_EQ(kWorkerCapacity * 2, worker_pool.GetWorkerCapacityForTesting());
+ ASSERT_EQ(kLocalMaxTasks * 2, worker_pool.NumberOfWorkersForTesting());
+ EXPECT_EQ(kLocalMaxTasks * 2, worker_pool.GetMaxTasksForTesting());
blocked_call_continue.Signal();
extra_threads_continue.Signal();
@@ -1378,8 +1396,8 @@ TEST(TaskSchedulerWorkerPoolOverWorkerCapacityTest, VerifyCleanup) {
// Note: one worker above capacity will not get cleaned up since it's on the
// top of the idle stack.
- worker_pool.WaitForWorkersCleanedUpForTesting(kWorkerCapacity - 1);
- EXPECT_EQ(kWorkerCapacity + 1, worker_pool.NumberOfWorkersForTesting());
+ worker_pool.WaitForWorkersCleanedUpForTesting(kLocalMaxTasks - 1);
+ EXPECT_EQ(kLocalMaxTasks + 1, worker_pool.NumberOfWorkersForTesting());
threads_continue.Signal();
@@ -1387,65 +1405,55 @@ TEST(TaskSchedulerWorkerPoolOverWorkerCapacityTest, VerifyCleanup) {
}
// Verify that the maximum number of workers is 256 and that hitting the max
-// leaves the pool in a valid state with regards to worker capacity.
+// leaves the pool in a valid state with regards to max tasks.
TEST_F(TaskSchedulerWorkerPoolBlockingTest, MaximumWorkersTest) {
constexpr size_t kMaxNumberOfWorkers = 256;
constexpr size_t kNumExtraTasks = 10;
- WaitableEvent early_blocking_thread_running(
- WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent early_blocking_threads_running;
RepeatingClosure early_threads_barrier_closure =
BarrierClosure(kMaxNumberOfWorkers,
BindOnce(&WaitableEvent::Signal,
- Unretained(&early_blocking_thread_running)));
+ Unretained(&early_blocking_threads_running)));
- WaitableEvent early_threads_finished(
- WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent early_threads_finished;
RepeatingClosure early_threads_finished_barrier = BarrierClosure(
kMaxNumberOfWorkers,
BindOnce(&WaitableEvent::Signal, Unretained(&early_threads_finished)));
- WaitableEvent early_release_thread_continue(
- WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent early_release_threads_continue;
// Post ScopedBlockingCall tasks to hit the worker cap.
for (size_t i = 0; i < kMaxNumberOfWorkers; ++i) {
task_runner_->PostTask(FROM_HERE,
BindOnce(
[](Closure* early_threads_barrier_closure,
- WaitableEvent* early_release_thread_continue,
+ WaitableEvent* early_release_threads_continue,
Closure* early_threads_finished) {
{
ScopedBlockingCall scoped_blocking_call(
BlockingType::WILL_BLOCK);
early_threads_barrier_closure->Run();
WaitWithoutBlockingObserver(
- early_release_thread_continue);
+ early_release_threads_continue);
}
early_threads_finished->Run();
},
Unretained(&early_threads_barrier_closure),
- Unretained(&early_release_thread_continue),
+ Unretained(&early_release_threads_continue),
Unretained(&early_threads_finished_barrier)));
}
- early_blocking_thread_running.Wait();
- EXPECT_EQ(worker_pool_->GetWorkerCapacityForTesting(),
- kNumWorkersInWorkerPool + kMaxNumberOfWorkers);
+ early_blocking_threads_running.Wait();
+ EXPECT_EQ(worker_pool_->GetMaxTasksForTesting(),
+ kMaxTasks + kMaxNumberOfWorkers);
- WaitableEvent late_release_thread_contine(
- WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent late_release_thread_contine;
+ WaitableEvent late_blocking_threads_running;
- WaitableEvent late_blocking_thread_running(
- WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
RepeatingClosure late_threads_barrier_closure = BarrierClosure(
kNumExtraTasks, BindOnce(&WaitableEvent::Signal,
- Unretained(&late_blocking_thread_running)));
+ Unretained(&late_blocking_threads_running)));
// Posts additional tasks. Note: we should already have |kMaxNumberOfWorkers|
// tasks running. These tasks should not be able to get executed yet as
@@ -1468,20 +1476,18 @@ TEST_F(TaskSchedulerWorkerPoolBlockingTest, MaximumWorkersTest) {
PlatformThread::Sleep(TestTimeouts::tiny_timeout());
EXPECT_LE(worker_pool_->NumberOfWorkersForTesting(), kMaxNumberOfWorkers);
- early_release_thread_continue.Signal();
+ early_release_threads_continue.Signal();
early_threads_finished.Wait();
- late_blocking_thread_running.Wait();
+ late_blocking_threads_running.Wait();
- WaitableEvent final_tasks_running(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
- WaitableEvent final_tasks_continue(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent final_tasks_running;
+ WaitableEvent final_tasks_continue;
RepeatingClosure final_tasks_running_barrier = BarrierClosure(
- kNumWorkersInWorkerPool,
+ kMaxTasks,
BindOnce(&WaitableEvent::Signal, Unretained(&final_tasks_running)));
// Verify that we are still able to saturate the pool.
- for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) {
+ for (size_t i = 0; i < kMaxTasks; ++i) {
task_runner_->PostTask(
FROM_HERE,
BindOnce(
@@ -1493,51 +1499,185 @@ TEST_F(TaskSchedulerWorkerPoolBlockingTest, MaximumWorkersTest) {
Unretained(&final_tasks_continue)));
}
final_tasks_running.Wait();
- EXPECT_EQ(worker_pool_->GetWorkerCapacityForTesting(),
- kNumWorkersInWorkerPool + kNumExtraTasks);
+ EXPECT_EQ(worker_pool_->GetMaxTasksForTesting(), kMaxTasks + kNumExtraTasks);
late_release_thread_contine.Signal();
final_tasks_continue.Signal();
task_tracker_.FlushForTesting();
}
+// Verify that the maximum number of background tasks that can run concurrently
+// is honored.
+TEST_F(TaskSchedulerWorkerPoolImplStartInBodyTest, MaxBackgroundTasks) {
+ constexpr int kMaxBackgroundTasks = kMaxTasks / 2;
+ worker_pool_->Start(
+ SchedulerWorkerPoolParams(kMaxTasks, base::TimeDelta::Max()),
+ kMaxBackgroundTasks, service_thread_.task_runner(), nullptr,
+ SchedulerWorkerPoolImpl::WorkerEnvironment::NONE);
+ const scoped_refptr<TaskRunner> foreground_runner =
+ worker_pool_->CreateTaskRunnerWithTraits({MayBlock()});
+ const scoped_refptr<TaskRunner> background_runner =
+ worker_pool_->CreateTaskRunnerWithTraits(
+ {TaskPriority::BACKGROUND, MayBlock()});
+
+ // It should be possible to have |kMaxBackgroundTasks|
+ // TaskPriority::BACKGROUND tasks running concurrently.
+ WaitableEvent background_tasks_running;
+ WaitableEvent unblock_background_tasks;
+ RepeatingClosure background_tasks_running_barrier = BarrierClosure(
+ kMaxBackgroundTasks,
+ BindOnce(&WaitableEvent::Signal, Unretained(&background_tasks_running)));
+
+ for (int i = 0; i < kMaxBackgroundTasks; ++i) {
+ background_runner->PostTask(
+ FROM_HERE, base::BindLambdaForTesting([&]() {
+ background_tasks_running_barrier.Run();
+ WaitWithoutBlockingObserver(&unblock_background_tasks);
+ }));
+ }
+ background_tasks_running.Wait();
+
+ // No more TaskPriority::BACKGROUND task should run.
+ AtomicFlag extra_background_task_can_run;
+ WaitableEvent extra_background_task_running;
+ background_runner->PostTask(
+ FROM_HERE, base::BindLambdaForTesting([&]() {
+ EXPECT_TRUE(extra_background_task_can_run.IsSet());
+ extra_background_task_running.Signal();
+ }));
+
+ // An extra foreground task should be able to run.
+ WaitableEvent foreground_task_running;
+ foreground_runner->PostTask(
+ FROM_HERE, base::BindOnce(&WaitableEvent::Signal,
+ Unretained(&foreground_task_running)));
+ foreground_task_running.Wait();
+
+ // Completion of the TaskPriority::BACKGROUND tasks should allow the extra
+ // TaskPriority::BACKGROUND task to run.
+ extra_background_task_can_run.Set();
+ unblock_background_tasks.Signal();
+ extra_background_task_running.Wait();
+
+ // Tear down.
+ task_tracker_.FlushForTesting();
+}
+
+namespace {
+
+class TaskSchedulerWorkerPoolBlockingCallAndMaxBackgroundTasksTest
+ : public TaskSchedulerWorkerPoolImplTestBase,
+ public testing::TestWithParam<BlockingType> {
+ public:
+ static constexpr int kMaxBackgroundTasks = kMaxTasks / 2;
+
+ TaskSchedulerWorkerPoolBlockingCallAndMaxBackgroundTasksTest() = default;
+
+ void SetUp() override {
+ CreateWorkerPool();
+ worker_pool_->Start(
+ SchedulerWorkerPoolParams(kMaxTasks, base::TimeDelta::Max()),
+ kMaxBackgroundTasks, service_thread_.task_runner(), nullptr,
+ SchedulerWorkerPoolImpl::WorkerEnvironment::NONE);
+ }
+
+ void TearDown() override {
+ TaskSchedulerWorkerPoolImplTestBase::CommonTearDown();
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(
+ TaskSchedulerWorkerPoolBlockingCallAndMaxBackgroundTasksTest);
+};
+
+} // namespace
+
+TEST_P(TaskSchedulerWorkerPoolBlockingCallAndMaxBackgroundTasksTest,
+ BlockingCallAndMaxBackgroundTasksTest) {
+ const scoped_refptr<TaskRunner> background_runner =
+ worker_pool_->CreateTaskRunnerWithTraits(
+ {TaskPriority::BACKGROUND, MayBlock()});
+
+ // Post |kMaxBackgroundTasks| TaskPriority::BACKGROUND tasks that block in a
+ // ScopedBlockingCall.
+ WaitableEvent blocking_background_tasks_running;
+ WaitableEvent unblock_blocking_background_tasks;
+ RepeatingClosure blocking_background_tasks_running_barrier =
+ BarrierClosure(kMaxBackgroundTasks,
+ BindOnce(&WaitableEvent::Signal,
+ Unretained(&blocking_background_tasks_running)));
+ for (int i = 0; i < kMaxBackgroundTasks; ++i) {
+ background_runner->PostTask(
+ FROM_HERE, base::BindLambdaForTesting([&]() {
+ blocking_background_tasks_running_barrier.Run();
+ ScopedBlockingCall scoped_blocking_call(GetParam());
+ WaitWithoutBlockingObserver(&unblock_blocking_background_tasks);
+ }));
+ }
+ blocking_background_tasks_running.Wait();
+
+ // Post an extra |kMaxBackgroundTasks| TaskPriority::BACKGROUND tasks. They
+ // should be able to run, because the existing TaskPriority::BACKGROUND tasks
+ // are blocked within a ScopedBlockingCall.
+ //
+ // Note: We block the tasks until they have all started running to make sure
+ // that it is possible to run an extra |kMaxBackgroundTasks| concurrently.
+ WaitableEvent background_tasks_running;
+ WaitableEvent unblock_background_tasks;
+ RepeatingClosure background_tasks_running_barrier = BarrierClosure(
+ kMaxBackgroundTasks,
+ BindOnce(&WaitableEvent::Signal, Unretained(&background_tasks_running)));
+ for (int i = 0; i < kMaxBackgroundTasks; ++i) {
+ background_runner->PostTask(
+ FROM_HERE, base::BindLambdaForTesting([&]() {
+ background_tasks_running_barrier.Run();
+ WaitWithoutBlockingObserver(&unblock_background_tasks);
+ }));
+ }
+ background_tasks_running.Wait();
+
+ // Unblock all tasks and tear down.
+ unblock_blocking_background_tasks.Signal();
+ unblock_background_tasks.Signal();
+ task_tracker_.FlushForTesting();
+}
+
+INSTANTIATE_TEST_CASE_P(
+ MayBlock,
+ TaskSchedulerWorkerPoolBlockingCallAndMaxBackgroundTasksTest,
+ ::testing::Values(BlockingType::MAY_BLOCK));
+INSTANTIATE_TEST_CASE_P(
+ WillBlock,
+ TaskSchedulerWorkerPoolBlockingCallAndMaxBackgroundTasksTest,
+ ::testing::Values(BlockingType::WILL_BLOCK));
+
// Verify that worker detachement doesn't race with worker cleanup, regression
// test for https://crbug.com/810464.
-TEST(TaskSchedulerWorkerPoolTest, RacyCleanup) {
+TEST_F(TaskSchedulerWorkerPoolImplStartInBodyTest, RacyCleanup) {
#if defined(OS_FUCHSIA)
// Fuchsia + QEMU doesn't deal well with *many* threads being
// created/destroyed at once: https://crbug.com/816575.
- constexpr size_t kWorkerCapacity = 16;
+ constexpr size_t kLocalMaxTasks = 16;
#else // defined(OS_FUCHSIA)
- constexpr size_t kWorkerCapacity = 256;
+ constexpr size_t kLocalMaxTasks = 256;
#endif // defined(OS_FUCHSIA)
constexpr TimeDelta kReclaimTimeForRacyCleanupTest =
TimeDelta::FromMilliseconds(10);
- TaskTracker task_tracker("Test");
- DelayedTaskManager delayed_task_manager;
- scoped_refptr<TaskRunner> service_thread_task_runner =
- MakeRefCounted<TestSimpleTaskRunner>();
- delayed_task_manager.Start(service_thread_task_runner);
- SchedulerWorkerPoolImpl worker_pool(
- "RacyCleanupTestWorkerPool", "A", ThreadPriority::NORMAL,
- task_tracker.GetTrackedRef(), &delayed_task_manager);
- worker_pool.Start(SchedulerWorkerPoolParams(kWorkerCapacity,
- kReclaimTimeForRacyCleanupTest),
- service_thread_task_runner, nullptr,
- SchedulerWorkerPoolImpl::WorkerEnvironment::NONE);
+ worker_pool_->Start(
+ SchedulerWorkerPoolParams(kLocalMaxTasks, kReclaimTimeForRacyCleanupTest),
+ kLocalMaxTasks, service_thread_.task_runner(), nullptr,
+ SchedulerWorkerPoolImpl::WorkerEnvironment::NONE);
scoped_refptr<TaskRunner> task_runner =
- worker_pool.CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()});
+ worker_pool_->CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()});
- WaitableEvent threads_running(WaitableEvent::ResetPolicy::AUTOMATIC,
- WaitableEvent::InitialState::NOT_SIGNALED);
- WaitableEvent unblock_threads(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent threads_running;
+ WaitableEvent unblock_threads;
RepeatingClosure threads_running_barrier = BarrierClosure(
- kWorkerCapacity,
+ kLocalMaxTasks,
BindOnce(&WaitableEvent::Signal, Unretained(&threads_running)));
- for (size_t i = 0; i < kWorkerCapacity; ++i) {
+ for (size_t i = 0; i < kLocalMaxTasks; ++i) {
task_runner->PostTask(
FROM_HERE,
BindOnce(
@@ -1556,10 +1696,11 @@ TEST(TaskSchedulerWorkerPoolTest, RacyCleanup) {
// being idle.
PlatformThread::Sleep(kReclaimTimeForRacyCleanupTest);
- worker_pool.JoinForTesting();
+ worker_pool_->JoinForTesting();
// Unwinding this test will be racy if worker cleanup can race with
// SchedulerWorkerPoolImpl destruction : https://crbug.com/810464.
+ worker_pool_.reset();
}
} // namespace internal
diff --git a/chromium/base/task_scheduler/scheduler_worker_pool_params.cc b/chromium/base/task_scheduler/scheduler_worker_pool_params.cc
index db855695444..08b4f7b0d90 100644
--- a/chromium/base/task_scheduler/scheduler_worker_pool_params.cc
+++ b/chromium/base/task_scheduler/scheduler_worker_pool_params.cc
@@ -7,10 +7,10 @@
namespace base {
SchedulerWorkerPoolParams::SchedulerWorkerPoolParams(
- int max_threads,
+ int max_tasks,
TimeDelta suggested_reclaim_time,
SchedulerBackwardCompatibility backward_compatibility)
- : max_threads_(max_threads),
+ : max_tasks_(max_tasks),
suggested_reclaim_time_(suggested_reclaim_time),
backward_compatibility_(backward_compatibility) {}
diff --git a/chromium/base/task_scheduler/scheduler_worker_pool_params.h b/chromium/base/task_scheduler/scheduler_worker_pool_params.h
index 928d3b4d426..9ee9472c349 100644
--- a/chromium/base/task_scheduler/scheduler_worker_pool_params.h
+++ b/chromium/base/task_scheduler/scheduler_worker_pool_params.h
@@ -12,13 +12,14 @@ namespace base {
class BASE_EXPORT SchedulerWorkerPoolParams final {
public:
- // Constructs a set of params used to initialize a pool. The pool will contain
- // up to |max_threads|. |suggested_reclaim_time| sets a suggestion on when to
- // reclaim idle threads. The pool is free to ignore this value for performance
- // or correctness reasons. |backward_compatibility| indicates whether backward
- // compatibility is enabled.
+ // Constructs a set of params used to initialize a pool. The pool will run
+ // concurrently at most |max_tasks| that aren't blocked (ScopedBlockingCall).
+ // |suggested_reclaim_time| sets a suggestion on when to reclaim idle threads.
+ // The pool is free to ignore this value for performance or correctness
+ // reasons. |backward_compatibility| indicates whether backward compatibility
+ // is enabled.
SchedulerWorkerPoolParams(
- int max_threads,
+ int max_tasks,
TimeDelta suggested_reclaim_time,
SchedulerBackwardCompatibility backward_compatibility =
SchedulerBackwardCompatibility::DISABLED);
@@ -26,14 +27,14 @@ class BASE_EXPORT SchedulerWorkerPoolParams final {
SchedulerWorkerPoolParams(const SchedulerWorkerPoolParams& other);
SchedulerWorkerPoolParams& operator=(const SchedulerWorkerPoolParams& other);
- int max_threads() const { return max_threads_; }
+ int max_tasks() const { return max_tasks_; }
TimeDelta suggested_reclaim_time() const { return suggested_reclaim_time_; }
SchedulerBackwardCompatibility backward_compatibility() const {
return backward_compatibility_;
}
private:
- int max_threads_;
+ int max_tasks_;
TimeDelta suggested_reclaim_time_;
SchedulerBackwardCompatibility backward_compatibility_;
};
diff --git a/chromium/base/task_scheduler/scheduler_worker_pool_unittest.cc b/chromium/base/task_scheduler/scheduler_worker_pool_unittest.cc
index 717409bc246..029345743be 100644
--- a/chromium/base/task_scheduler/scheduler_worker_pool_unittest.cc
+++ b/chromium/base/task_scheduler/scheduler_worker_pool_unittest.cc
@@ -34,7 +34,9 @@ namespace internal {
namespace {
-constexpr size_t kNumWorkersInWorkerPool = 4;
+constexpr size_t kMaxTasks = 4;
+// By default, tests allow half of the pool to be used by background tasks.
+constexpr size_t kMaxBackgroundTasks = kMaxTasks / 2;
constexpr size_t kNumThreadsPostingTasks = 4;
constexpr size_t kNumTasksPostedPerThread = 150;
@@ -131,9 +133,8 @@ class TaskSchedulerWorkerPoolTest
SchedulerWorkerPoolImpl* scheduler_worker_pool_impl =
static_cast<SchedulerWorkerPoolImpl*>(worker_pool_.get());
scheduler_worker_pool_impl->Start(
- SchedulerWorkerPoolParams(kNumWorkersInWorkerPool,
- TimeDelta::Max()),
- service_thread_.task_runner(), nullptr,
+ SchedulerWorkerPoolParams(kMaxTasks, TimeDelta::Max()),
+ kMaxBackgroundTasks, service_thread_.task_runner(), nullptr,
SchedulerWorkerPoolImpl::WorkerEnvironment::NONE);
break;
}
@@ -275,8 +276,7 @@ TEST_P(TaskSchedulerWorkerPoolTest, SequencedRunsTasksInCurrentSequence) {
auto sequenced_task_runner =
worker_pool_->CreateSequencedTaskRunnerWithTraits(TaskTraits());
- WaitableEvent task_ran(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent task_ran;
task_runner->PostTask(
FROM_HERE,
BindOnce(
@@ -291,10 +291,8 @@ TEST_P(TaskSchedulerWorkerPoolTest, SequencedRunsTasksInCurrentSequence) {
// Verify that tasks posted before Start run after Start.
TEST_P(TaskSchedulerWorkerPoolTest, PostBeforeStart) {
- WaitableEvent task_1_running(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
- WaitableEvent task_2_running(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent task_1_running;
+ WaitableEvent task_2_running;
scoped_refptr<TaskRunner> task_runner =
worker_pool_->CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()});
diff --git a/chromium/base/task_scheduler/scheduler_worker_unittest.cc b/chromium/base/task_scheduler/scheduler_worker_unittest.cc
index 65f5f002255..1112f55e6cf 100644
--- a/chromium/base/task_scheduler/scheduler_worker_unittest.cc
+++ b/chromium/base/task_scheduler/scheduler_worker_unittest.cc
@@ -16,6 +16,7 @@
#include "base/memory/ref_counted.h"
#include "base/synchronization/condition_variable.h"
#include "base/synchronization/waitable_event.h"
+#include "base/task_scheduler/environment_config.h"
#include "base/task_scheduler/scheduler_lock.h"
#include "base/task_scheduler/scheduler_worker_observer.h"
#include "base/task_scheduler/sequence.h"
@@ -78,11 +79,7 @@ class SchedulerWorkerDefaultDelegate : public SchedulerWorker::Delegate {
class TaskSchedulerWorkerTest : public testing::TestWithParam<size_t> {
protected:
TaskSchedulerWorkerTest()
- : main_entry_called_(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED),
- num_get_work_cv_(lock_.CreateConditionVariable()),
- worker_set_(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED) {}
+ : num_get_work_cv_(lock_.CreateConditionVariable()) {}
void SetUp() override {
worker_ = MakeRefCounted<SchedulerWorker>(
@@ -189,7 +186,7 @@ class TaskSchedulerWorkerTest : public testing::TestWithParam<size_t> {
BindOnce(&TaskSchedulerWorkerTest::RunTaskCallback,
Unretained(outer_)),
TaskTraits(), TimeDelta());
- EXPECT_TRUE(outer_->task_tracker_.WillPostTask(task));
+ EXPECT_TRUE(outer_->task_tracker_.WillPostTask(&task));
sequence->PushTask(std::move(task));
}
@@ -376,17 +373,7 @@ class ControllableCleanupDelegate : public SchedulerWorkerDefaultDelegate {
public:
class Controls : public RefCountedThreadSafe<Controls> {
public:
- Controls()
- : work_running_(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::SIGNALED),
- work_processed_(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED),
- cleanup_requested_(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED),
- destroyed_(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED),
- exited_(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED) {}
+ Controls() = default;
void HaveWorkBlock() { work_running_.Reset(); }
@@ -419,7 +406,8 @@ class ControllableCleanupDelegate : public SchedulerWorkerDefaultDelegate {
friend class RefCountedThreadSafe<Controls>;
~Controls() = default;
- WaitableEvent work_running_;
+ WaitableEvent work_running_{WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::SIGNALED};
WaitableEvent work_processed_;
WaitableEvent cleanup_requested_;
WaitableEvent destroyed_;
@@ -465,7 +453,7 @@ class ControllableCleanupDelegate : public SchedulerWorkerDefaultDelegate {
Unretained(&controls_->work_running_)),
{WithBaseSyncPrimitives(), TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN},
TimeDelta());
- EXPECT_TRUE(task_tracker_->WillPostTask(task));
+ EXPECT_TRUE(task_tracker_->WillPostTask(&task));
sequence->PushTask(std::move(task));
sequence =
task_tracker_->WillScheduleSequence(std::move(sequence), nullptr);
@@ -640,9 +628,7 @@ class CallJoinFromDifferentThread : public SimpleThread {
public:
CallJoinFromDifferentThread(SchedulerWorker* worker_to_join)
: SimpleThread("SchedulerWorkerJoinThread"),
- worker_to_join_(worker_to_join),
- run_started_event_(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED) {}
+ worker_to_join_(worker_to_join) {}
~CallJoinFromDifferentThread() override = default;
@@ -746,16 +732,15 @@ class ExpectThreadPriorityDelegate : public SchedulerWorkerDefaultDelegate {
} // namespace
TEST(TaskSchedulerWorkerTest, BumpPriorityOfAliveThreadDuringShutdown) {
+ if (!CanUseBackgroundPriorityForSchedulerWorker())
+ return;
+
TaskTracker task_tracker("Test");
std::unique_ptr<ExpectThreadPriorityDelegate> delegate(
new ExpectThreadPriorityDelegate);
ExpectThreadPriorityDelegate* delegate_raw = delegate.get();
- delegate_raw->SetExpectedThreadPriority(
- PlatformThread::CanIncreaseCurrentThreadPriority()
- ? ThreadPriority::BACKGROUND
- : ThreadPriority::NORMAL);
-
+ delegate_raw->SetExpectedThreadPriority(ThreadPriority::BACKGROUND);
auto worker = MakeRefCounted<SchedulerWorker>(ThreadPriority::BACKGROUND,
std::move(delegate),
task_tracker.GetTrackedRef());
@@ -831,9 +816,7 @@ namespace {
class CoInitializeDelegate : public SchedulerWorkerDefaultDelegate {
public:
- CoInitializeDelegate()
- : get_work_returned_(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED) {}
+ CoInitializeDelegate() = default;
scoped_refptr<Sequence> GetWork(SchedulerWorker* worker) override {
EXPECT_FALSE(get_work_returned_.IsSignaled());
diff --git a/chromium/base/task_scheduler/service_thread.cc b/chromium/base/task_scheduler/service_thread.cc
index dd01d25de13..12cd3282dd3 100644
--- a/chromium/base/task_scheduler/service_thread.cc
+++ b/chromium/base/task_scheduler/service_thread.cc
@@ -4,28 +4,54 @@
#include "base/task_scheduler/service_thread.h"
+#include "base/bind.h"
+#include "base/bind_helpers.h"
#include "base/debug/alias.h"
+#include "base/rand_util.h"
+#include "base/stl_util.h"
#include "base/task_scheduler/post_task.h"
#include "base/task_scheduler/task_scheduler.h"
#include "base/task_scheduler/task_tracker.h"
#include "base/task_scheduler/task_traits.h"
-#include "base/time/time.h"
+#include "build/build_config.h"
namespace base {
namespace internal {
+namespace {
+
+TimeDelta g_heartbeat_for_testing = TimeDelta();
+
+} // namespace
+
ServiceThread::ServiceThread(const TaskTracker* task_tracker)
: Thread("TaskSchedulerServiceThread"), task_tracker_(task_tracker) {}
+// static
+void ServiceThread::SetHeartbeatIntervalForTesting(TimeDelta heartbeat) {
+ g_heartbeat_for_testing = heartbeat;
+}
+
void ServiceThread::Init() {
// In unit tests we sometimes do not have a fully functional TaskScheduler
// environment, do not perform the heartbeat report in that case since it
// relies on such an environment.
if (task_tracker_ && TaskScheduler::GetInstance()) {
+// Seemingly causing power regression on Android, disable to see if truly at
+// fault : https://crbug.com/848255
+#if !defined(OS_ANDROID)
+ // Compute the histogram every hour (with a slight offset to drift if that
+ // hour tick happens to line up with specific events). Once per hour per
+ // user was deemed sufficient to gather a reliable metric.
+ constexpr TimeDelta kHeartbeat = TimeDelta::FromMinutes(59);
+
heartbeat_latency_timer_.Start(
- FROM_HERE, TimeDelta::FromSeconds(5),
+ FROM_HERE,
+ g_heartbeat_for_testing.is_zero() ? kHeartbeat
+ : g_heartbeat_for_testing,
BindRepeating(&ServiceThread::PerformHeartbeatLatencyReport,
Unretained(this)));
+#endif
}
}
@@ -41,16 +67,26 @@ void ServiceThread::PerformHeartbeatLatencyReport() const {
{TaskPriority::USER_VISIBLE}, {TaskPriority::USER_VISIBLE, MayBlock()},
{TaskPriority::USER_BLOCKING}, {TaskPriority::USER_BLOCKING, MayBlock()}};
- for (auto& traits : kReportedTraits) {
- // Post through the static API to time the full stack. Use a new Now() for
- // every set of traits in case PostTaskWithTraits() itself is slow.
- base::PostTaskWithTraits(
- FROM_HERE, traits,
- BindOnce(&TaskTracker::RecordLatencyHistogram,
- Unretained(task_tracker_),
- TaskTracker::LatencyHistogramType::HEARTBEAT_LATENCY, traits,
- TimeTicks::Now()));
- }
+ // Only record latency for one set of TaskTraits per report to avoid bias in
+ // the order in which tasks are posted (should we record all at once) as well
+ // as to avoid spinning up many worker threads to process this report if the
+ // scheduler is currently idle (each pool keeps at least one idle thread so a
+ // single task isn't an issue).
+
+ // Invoke RandInt() out-of-line to ensure it's obtained before
+ // TimeTicks::Now().
+ const TaskTraits& profiled_traits =
+ kReportedTraits[RandInt(0, base::size(kReportedTraits) - 1)];
+
+ // Post through the static API to time the full stack. Use a new Now() for
+ // every set of traits in case PostTaskWithTraits() itself is slow.
+ // Bonus: this appraoch also includes the overhead of Bind() in the reported
+ // latency).
+ base::PostTaskWithTraits(
+ FROM_HERE, profiled_traits,
+ BindOnce(&TaskTracker::RecordLatencyHistogram, Unretained(task_tracker_),
+ TaskTracker::LatencyHistogramType::HEARTBEAT_LATENCY,
+ profiled_traits, TimeTicks::Now()));
}
} // namespace internal
diff --git a/chromium/base/task_scheduler/service_thread.h b/chromium/base/task_scheduler/service_thread.h
index f9b23fab22e..caadc1d1c67 100644
--- a/chromium/base/task_scheduler/service_thread.h
+++ b/chromium/base/task_scheduler/service_thread.h
@@ -8,6 +8,7 @@
#include "base/base_export.h"
#include "base/macros.h"
#include "base/threading/thread.h"
+#include "base/time/time.h"
#include "base/timer/timer.h"
namespace base {
@@ -29,13 +30,18 @@ class BASE_EXPORT ServiceThread : public Thread {
// ServiceThread.
explicit ServiceThread(const TaskTracker* task_tracker);
+ // Overrides the default interval at which |heartbeat_latency_timer_| fires.
+ // Call this with a |heartbeat| of zero to undo the override.
+ // Must not be called while the ServiceThread is running.
+ static void SetHeartbeatIntervalForTesting(TimeDelta heartbeat);
+
private:
// Thread:
void Init() override;
void Run(RunLoop* run_loop) override;
- // Kicks off async tasks which will record a histogram on the latency of
- // various traits.
+ // Kicks off a single async task which will record a histogram on the latency
+ // of a randomly chosen set of TaskTraits.
void PerformHeartbeatLatencyReport() const;
const TaskTracker* const task_tracker_;
diff --git a/chromium/base/task_scheduler/service_thread_unittest.cc b/chromium/base/task_scheduler/service_thread_unittest.cc
index 9f61f9b8d02..3c4276259bd 100644
--- a/chromium/base/task_scheduler/service_thread_unittest.cc
+++ b/chromium/base/task_scheduler/service_thread_unittest.cc
@@ -10,7 +10,7 @@
#include "base/debug/stack_trace.h"
#include "base/task_scheduler/task_scheduler.h"
#include "base/task_scheduler/task_scheduler_impl.h"
-#include "base/test/histogram_tester.h"
+#include "base/test/metrics/histogram_tester.h"
#include "base/threading/platform_thread.h"
#include "base/time/time.h"
#include "build/build_config.h"
@@ -52,10 +52,20 @@ TEST(TaskSchedulerServiceThreadTest, MAYBE_StackHasIdentifyingFrame) {
service_thread.FlushForTesting();
}
+#if defined(OS_ANDROID)
+// The heartbeat latency report has been temporarily disabled on Android per
+// https://crbug.com/848255.
+#define MAYBE_HeartbeatLatencyReport DISABLED_HeartbeatLatencyReport
+#else
+#define MAYBE_HeartbeatLatencyReport HeartbeatLatencyReport
+#endif
+
// Integration test verifying that a service thread running in a fully
// integrated TaskScheduler environment results in reporting
// HeartbeatLatencyMicroseconds metrics.
-TEST(TaskSchedulerServiceThreadIntegrationTest, HeartbeatLatencyReport) {
+TEST(TaskSchedulerServiceThreadIntegrationTest, MAYBE_HeartbeatLatencyReport) {
+ ServiceThread::SetHeartbeatIntervalForTesting(TimeDelta::FromMilliseconds(1));
+
TaskScheduler::SetInstance(
std::make_unique<internal::TaskSchedulerImpl>("Test"));
TaskScheduler::GetInstance()->StartWithDefaultParams();
@@ -74,14 +84,17 @@ TEST(TaskSchedulerServiceThreadIntegrationTest, HeartbeatLatencyReport) {
"TaskScheduler.HeartbeatLatencyMicroseconds.Test."
"BackgroundTaskPriority_MayBlock"};
- constexpr TimeDelta kReasonableTimeout = TimeDelta::FromSeconds(6);
+ // Each report hits a single histogram above (randomly selected). But 1000
+ // reports should touch all histograms at least once the vast majority of the
+ // time.
+ constexpr TimeDelta kReasonableTimeout = TimeDelta::FromSeconds(1);
constexpr TimeDelta kBusyWaitTime = TimeDelta::FromMilliseconds(100);
const TimeTicks start_time = TimeTicks::Now();
HistogramTester tester;
for (const char* expected_metric : kExpectedMetrics) {
- for (int i = 0; tester.GetAllSamples(expected_metric).empty(); ++i) {
+ while (tester.GetAllSamples(expected_metric).empty()) {
if (TimeTicks::Now() - start_time > kReasonableTimeout)
LOG(WARNING) << "Waiting a while for " << expected_metric;
PlatformThread::Sleep(kBusyWaitTime);
@@ -90,6 +103,8 @@ TEST(TaskSchedulerServiceThreadIntegrationTest, HeartbeatLatencyReport) {
TaskScheduler::GetInstance()->JoinForTesting();
TaskScheduler::SetInstance(nullptr);
+
+ ServiceThread::SetHeartbeatIntervalForTesting(TimeDelta());
}
} // namespace internal
diff --git a/chromium/base/task_scheduler/task_scheduler.h b/chromium/base/task_scheduler/task_scheduler.h
index cb6d0970d7b..b07ea286311 100644
--- a/chromium/base/task_scheduler/task_scheduler.h
+++ b/chromium/base/task_scheduler/task_scheduler.h
@@ -230,7 +230,8 @@ class BASE_EXPORT TaskScheduler {
friend class content::BrowserMainLoopTest_CreateThreadsInSingleProcess_Test;
// Returns the maximum number of non-single-threaded non-blocked tasks posted
- // with |traits| that can run concurrently in this TaskScheduler.
+ // with |traits| that can run concurrently in this TaskScheduler. |traits|
+ // can't contain TaskPriority::BACKGROUND.
//
// Do not use this method. To process n items, post n tasks that each process
// 1 item rather than GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated()
diff --git a/chromium/base/task_scheduler/task_scheduler_impl.cc b/chromium/base/task_scheduler/task_scheduler_impl.cc
index a5ab06c4fb2..605c1e88ca7 100644
--- a/chromium/base/task_scheduler/task_scheduler_impl.cc
+++ b/chromium/base/task_scheduler/task_scheduler_impl.cc
@@ -4,12 +4,14 @@
#include "base/task_scheduler/task_scheduler_impl.h"
+#include <algorithm>
#include <string>
#include <utility>
#include "base/compiler_specific.h"
#include "base/message_loop/message_loop.h"
#include "base/metrics/field_trial_params.h"
+#include "base/stl_util.h"
#include "base/strings/string_util.h"
#include "base/task_scheduler/delayed_task_manager.h"
#include "base/task_scheduler/environment_config.h"
@@ -37,21 +39,42 @@ TaskSchedulerImpl::TaskSchedulerImpl(
&delayed_task_manager_) {
DCHECK(!histogram_label.empty());
- static_assert(arraysize(worker_pools_) == ENVIRONMENT_COUNT,
- "The size of |worker_pools_| must match ENVIRONMENT_COUNT.");
+ static_assert(arraysize(environment_to_worker_pool_) == ENVIRONMENT_COUNT,
+ "The size of |environment_to_worker_pool_| must match "
+ "ENVIRONMENT_COUNT.");
static_assert(
- arraysize(kEnvironmentParams) == ENVIRONMENT_COUNT,
+ size(kEnvironmentParams) == ENVIRONMENT_COUNT,
"The size of |kEnvironmentParams| must match ENVIRONMENT_COUNT.");
- for (int environment_type = 0; environment_type < ENVIRONMENT_COUNT;
+ int num_pools_to_create = CanUseBackgroundPriorityForSchedulerWorker()
+ ? ENVIRONMENT_COUNT
+ : ENVIRONMENT_COUNT_WITHOUT_BACKGROUND_PRIORITY;
+ for (int environment_type = 0; environment_type < num_pools_to_create;
++environment_type) {
- worker_pools_[environment_type] = std::make_unique<SchedulerWorkerPoolImpl>(
+ worker_pools_.emplace_back(std::make_unique<SchedulerWorkerPoolImpl>(
JoinString(
{histogram_label, kEnvironmentParams[environment_type].name_suffix},
"."),
kEnvironmentParams[environment_type].name_suffix,
kEnvironmentParams[environment_type].priority_hint,
- task_tracker_->GetTrackedRef(), &delayed_task_manager_);
+ task_tracker_->GetTrackedRef(), &delayed_task_manager_));
+ }
+
+ // Map environment indexes to pools.
+ environment_to_worker_pool_[FOREGROUND] = worker_pools_[FOREGROUND].get();
+ environment_to_worker_pool_[FOREGROUND_BLOCKING] =
+ worker_pools_[FOREGROUND_BLOCKING].get();
+
+ if (CanUseBackgroundPriorityForSchedulerWorker()) {
+ environment_to_worker_pool_[BACKGROUND] = worker_pools_[BACKGROUND].get();
+ environment_to_worker_pool_[BACKGROUND_BLOCKING] =
+ worker_pools_[BACKGROUND_BLOCKING].get();
+ } else {
+ // On platforms without background thread priority, tasks posted to the
+ // background environment are run by foreground pools.
+ environment_to_worker_pool_[BACKGROUND] = worker_pools_[FOREGROUND].get();
+ environment_to_worker_pool_[BACKGROUND_BLOCKING] =
+ worker_pools_[FOREGROUND_BLOCKING].get();
}
}
@@ -112,20 +135,41 @@ void TaskSchedulerImpl::Start(
SchedulerWorkerPoolImpl::WorkerEnvironment::NONE;
#endif
- worker_pools_[BACKGROUND]->Start(
- init_params.background_worker_pool_params, service_thread_task_runner,
- scheduler_worker_observer, worker_environment);
- worker_pools_[BACKGROUND_BLOCKING]->Start(
- init_params.background_blocking_worker_pool_params,
- service_thread_task_runner, scheduler_worker_observer,
- worker_environment);
+ // On platforms that can't use the background thread priority, background
+ // tasks run in foreground pools. A cap is set on the number of background
+ // tasks that can run in foreground pools to ensure that there is always room
+ // for incoming foreground tasks and to minimize the performance impact of
+ // background tasks.
+ const int max_background_tasks_in_foreground_pool = std::max(
+ 1, std::min(init_params.background_worker_pool_params.max_tasks(),
+ init_params.foreground_worker_pool_params.max_tasks() / 2));
worker_pools_[FOREGROUND]->Start(
- init_params.foreground_worker_pool_params, service_thread_task_runner,
+ init_params.foreground_worker_pool_params,
+ max_background_tasks_in_foreground_pool, service_thread_task_runner,
scheduler_worker_observer, worker_environment);
+ const int max_background_tasks_in_foreground_blocking_pool = std::max(
+ 1,
+ std::min(
+ init_params.background_blocking_worker_pool_params.max_tasks(),
+ init_params.foreground_blocking_worker_pool_params.max_tasks() / 2));
worker_pools_[FOREGROUND_BLOCKING]->Start(
init_params.foreground_blocking_worker_pool_params,
+ max_background_tasks_in_foreground_blocking_pool,
service_thread_task_runner, scheduler_worker_observer,
worker_environment);
+
+ if (CanUseBackgroundPriorityForSchedulerWorker()) {
+ worker_pools_[BACKGROUND]->Start(
+ init_params.background_worker_pool_params,
+ init_params.background_worker_pool_params.max_tasks(),
+ service_thread_task_runner, scheduler_worker_observer,
+ worker_environment);
+ worker_pools_[BACKGROUND_BLOCKING]->Start(
+ init_params.background_blocking_worker_pool_params,
+ init_params.background_blocking_worker_pool_params.max_tasks(),
+ service_thread_task_runner, scheduler_worker_observer,
+ worker_environment);
+ }
}
void TaskSchedulerImpl::PostDelayedTaskWithTraits(const Location& from_here,
@@ -184,6 +228,9 @@ std::vector<const HistogramBase*> TaskSchedulerImpl::GetHistograms() const {
int TaskSchedulerImpl::GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated(
const TaskTraits& traits) const {
+ // This method does not support getting the maximum number of BACKGROUND tasks
+ // that can run concurrently in a pool.
+ DCHECK_NE(traits.priority(), TaskPriority::BACKGROUND);
return GetWorkerPoolForTraits(traits)
->GetMaxConcurrentNonBlockedTasksDeprecated();
}
@@ -220,7 +267,7 @@ void TaskSchedulerImpl::JoinForTesting() {
SchedulerWorkerPoolImpl* TaskSchedulerImpl::GetWorkerPoolForTraits(
const TaskTraits& traits) const {
- return worker_pools_[GetEnvironmentIndexForTraits(traits)].get();
+ return environment_to_worker_pool_[GetEnvironmentIndexForTraits(traits)];
}
TaskTraits TaskSchedulerImpl::SetUserBlockingPriorityIfNeeded(
diff --git a/chromium/base/task_scheduler/task_scheduler_impl.h b/chromium/base/task_scheduler/task_scheduler_impl.h
index 81a5a871ce8..598079d57b9 100644
--- a/chromium/base/task_scheduler/task_scheduler_impl.h
+++ b/chromium/base/task_scheduler/task_scheduler_impl.h
@@ -17,6 +17,7 @@
#include "base/strings/string_piece.h"
#include "base/synchronization/atomic_flag.h"
#include "base/task_scheduler/delayed_task_manager.h"
+#include "base/task_scheduler/environment_config.h"
#include "base/task_scheduler/scheduler_single_thread_task_runner_manager.h"
#include "base/task_scheduler/scheduler_worker_pool_impl.h"
#include "base/task_scheduler/single_thread_task_runner_thread_mode.h"
@@ -109,9 +110,12 @@ class BASE_EXPORT TaskSchedulerImpl : public TaskScheduler {
// TODO(fdoray): Remove after experiment. https://crbug.com/757022
AtomicFlag all_tasks_user_blocking_;
- // There are 4 SchedulerWorkerPoolImpl in this array to match the 4
- // SchedulerWorkerPoolParams in TaskScheduler::InitParams.
- std::unique_ptr<SchedulerWorkerPoolImpl> worker_pools_[4];
+ // Owns all the pools managed by this TaskScheduler.
+ std::vector<std::unique_ptr<SchedulerWorkerPoolImpl>> worker_pools_;
+
+ // Maps an environment from EnvironmentType to a pool in |worker_pools_|.
+ SchedulerWorkerPoolImpl* environment_to_worker_pool_[static_cast<int>(
+ EnvironmentType::ENVIRONMENT_COUNT)];
#if DCHECK_IS_ON()
// Set once JoinForTesting() has returned.
diff --git a/chromium/base/task_scheduler/task_scheduler_impl_unittest.cc b/chromium/base/task_scheduler/task_scheduler_impl_unittest.cc
index 4fe4a25369b..94c5293903e 100644
--- a/chromium/base/task_scheduler/task_scheduler_impl_unittest.cc
+++ b/chromium/base/task_scheduler/task_scheduler_impl_unittest.cc
@@ -13,18 +13,20 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/callback.h"
+#include "base/cfi_buildflags.h"
#include "base/debug/stack_trace.h"
#include "base/macros.h"
#include "base/memory/ptr_util.h"
#include "base/metrics/field_trial.h"
#include "base/metrics/field_trial_params.h"
-#include "base/synchronization/lock.h"
#include "base/synchronization/waitable_event.h"
+#include "base/task_scheduler/environment_config.h"
#include "base/task_scheduler/scheduler_worker_observer.h"
#include "base/task_scheduler/scheduler_worker_pool_params.h"
#include "base/task_scheduler/task_traits.h"
#include "base/task_scheduler/test_task_factory.h"
#include "base/task_scheduler/test_utils.h"
+#include "base/test/gtest_util.h"
#include "base/test/test_timeouts.h"
#include "base/threading/platform_thread.h"
#include "base/threading/sequence_local_storage_slot.h"
@@ -75,11 +77,7 @@ bool GetIOAllowed() {
// to run a Task with |traits|.
// Note: ExecutionMode is verified inside TestTaskFactory.
void VerifyTaskEnvironment(const TaskTraits& traits) {
- const bool supports_background_priority =
- Lock::HandlesMultipleThreadPriorities() &&
- PlatformThread::CanIncreaseCurrentThreadPriority();
-
- EXPECT_EQ(supports_background_priority &&
+ EXPECT_EQ(CanUseBackgroundPriorityForSchedulerWorker() &&
traits.priority() == TaskPriority::BACKGROUND
? ThreadPriority::BACKGROUND
: ThreadPriority::NORMAL,
@@ -94,10 +92,24 @@ void VerifyTaskEnvironment(const TaskTraits& traits) {
// Verify that the thread the task is running on is named as expected.
const std::string current_thread_name(PlatformThread::GetName());
EXPECT_NE(std::string::npos, current_thread_name.find("TaskScheduler"));
- EXPECT_NE(std::string::npos,
- current_thread_name.find(
- traits.priority() == TaskPriority::BACKGROUND ? "Background"
- : "Foreground"));
+
+ if (current_thread_name.find("SingleThread") != std::string::npos) {
+ // For now, single-threaded background tasks run on their own threads.
+ // TODO(fdoray): Run single-threaded background tasks on foreground workers
+ // on platforms that don't support background thread priority.
+ EXPECT_NE(
+ std::string::npos,
+ current_thread_name.find(traits.priority() == TaskPriority::BACKGROUND
+ ? "Background"
+ : "Foreground"));
+ } else {
+ EXPECT_NE(std::string::npos,
+ current_thread_name.find(
+ CanUseBackgroundPriorityForSchedulerWorker() &&
+ traits.priority() == TaskPriority::BACKGROUND
+ ? "Background"
+ : "Foreground"));
+ }
EXPECT_EQ(traits.may_block(),
current_thread_name.find("Blocking") != std::string::npos);
}
@@ -255,8 +267,7 @@ class TaskSchedulerImplTest
// restrictions. The ExecutionMode parameter is ignored by this test.
TEST_P(TaskSchedulerImplTest, PostDelayedTaskWithTraitsNoDelay) {
StartTaskScheduler();
- WaitableEvent task_ran(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent task_ran;
scheduler_.PostDelayedTaskWithTraits(
FROM_HERE, GetParam().traits,
BindOnce(&VerifyTaskEnvironmentAndSignalEvent, GetParam().traits,
@@ -271,8 +282,7 @@ TEST_P(TaskSchedulerImplTest, PostDelayedTaskWithTraitsNoDelay) {
// ignored by this test.
TEST_P(TaskSchedulerImplTest, PostDelayedTaskWithTraitsWithDelay) {
StartTaskScheduler();
- WaitableEvent task_ran(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent task_ran;
scheduler_.PostDelayedTaskWithTraits(
FROM_HERE, GetParam().traits,
BindOnce(&VerifyTimeAndTaskEnvironmentAndSignalEvent, GetParam().traits,
@@ -305,8 +315,7 @@ TEST_P(TaskSchedulerImplTest, PostTasksViaTaskRunner) {
// Verifies that a task posted via PostDelayedTaskWithTraits without a delay
// doesn't run before Start() is called.
TEST_P(TaskSchedulerImplTest, PostDelayedTaskWithTraitsNoDelayBeforeStart) {
- WaitableEvent task_running(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent task_running;
scheduler_.PostDelayedTaskWithTraits(
FROM_HERE, GetParam().traits,
BindOnce(&VerifyTaskEnvironmentAndSignalEvent, GetParam().traits,
@@ -327,8 +336,7 @@ TEST_P(TaskSchedulerImplTest, PostDelayedTaskWithTraitsNoDelayBeforeStart) {
// Verifies that a task posted via PostDelayedTaskWithTraits with a delay
// doesn't run before Start() is called.
TEST_P(TaskSchedulerImplTest, PostDelayedTaskWithTraitsWithDelayBeforeStart) {
- WaitableEvent task_running(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent task_running;
scheduler_.PostDelayedTaskWithTraits(
FROM_HERE, GetParam().traits,
BindOnce(&VerifyTimeAndTaskEnvironmentAndSignalEvent, GetParam().traits,
@@ -350,8 +358,7 @@ TEST_P(TaskSchedulerImplTest, PostDelayedTaskWithTraitsWithDelayBeforeStart) {
// Verifies that a task posted via a TaskRunner doesn't run before Start() is
// called.
TEST_P(TaskSchedulerImplTest, PostTaskViaTaskRunnerBeforeStart) {
- WaitableEvent task_running(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent task_running;
CreateTaskRunnerWithTraitsAndExecutionMode(&scheduler_, GetParam().traits,
GetParam().execution_mode)
->PostTask(FROM_HERE,
@@ -378,8 +385,7 @@ TEST_P(TaskSchedulerImplTest, AllTasksAreUserBlockingTaskRunner) {
EnableAllTasksUserBlocking();
StartTaskScheduler();
- WaitableEvent task_running(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent task_running;
CreateTaskRunnerWithTraitsAndExecutionMode(&scheduler_, GetParam().traits,
GetParam().execution_mode)
->PostTask(FROM_HERE,
@@ -397,8 +403,7 @@ TEST_P(TaskSchedulerImplTest, AllTasksAreUserBlocking) {
EnableAllTasksUserBlocking();
StartTaskScheduler();
- WaitableEvent task_running(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent task_running;
// Ignore |params.execution_mode| in this test.
scheduler_.PostDelayedTaskWithTraits(
FROM_HERE, GetParam().traits,
@@ -415,8 +420,7 @@ TEST_P(TaskSchedulerImplTest, AllTasksAreUserBlocking) {
TEST_P(TaskSchedulerImplTest, FlushAsyncForTestingSimple) {
StartTaskScheduler();
- WaitableEvent unblock_task(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent unblock_task;
CreateTaskRunnerWithTraitsAndExecutionMode(
&scheduler_,
TaskTraits::Override(GetParam().traits, {WithBaseSyncPrimitives()}),
@@ -424,8 +428,7 @@ TEST_P(TaskSchedulerImplTest, FlushAsyncForTestingSimple) {
->PostTask(FROM_HERE,
BindOnce(&WaitableEvent::Wait, Unretained(&unblock_task)));
- WaitableEvent flush_event(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent flush_event;
scheduler_.FlushAsyncForTesting(
BindOnce(&WaitableEvent::Signal, Unretained(&flush_event)));
PlatformThread::Sleep(TestTimeouts::tiny_timeout());
@@ -463,10 +466,18 @@ TEST_F(TaskSchedulerImplTest, MultipleTraitsExecutionModePairs) {
TEST_F(TaskSchedulerImplTest,
GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated) {
StartTaskScheduler();
- EXPECT_EQ(1, scheduler_.GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated(
- {TaskPriority::BACKGROUND}));
- EXPECT_EQ(3, scheduler_.GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated(
- {MayBlock(), TaskPriority::BACKGROUND}));
+
+ // GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated() does not support
+ // TaskPriority::BACKGROUND.
+ EXPECT_DCHECK_DEATH({
+ scheduler_.GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated(
+ {TaskPriority::BACKGROUND});
+ });
+ EXPECT_DCHECK_DEATH({
+ scheduler_.GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated(
+ {MayBlock(), TaskPriority::BACKGROUND});
+ });
+
EXPECT_EQ(4, scheduler_.GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated(
{TaskPriority::USER_VISIBLE}));
EXPECT_EQ(12, scheduler_.GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated(
@@ -487,8 +498,7 @@ TEST_F(TaskSchedulerImplTest, SequencedRunsTasksInCurrentSequence) {
auto sequenced_task_runner =
scheduler_.CreateSequencedTaskRunnerWithTraits(TaskTraits());
- WaitableEvent task_ran(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent task_ran;
single_thread_task_runner->PostTask(
FROM_HERE,
BindOnce(
@@ -512,8 +522,7 @@ TEST_F(TaskSchedulerImplTest, SingleThreadRunsTasksInCurrentSequence) {
scheduler_.CreateSingleThreadTaskRunnerWithTraits(
TaskTraits(), SingleThreadTaskRunnerThreadMode::SHARED);
- WaitableEvent task_ran(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent task_ran;
sequenced_task_runner->PostTask(
FROM_HERE,
BindOnce(
@@ -533,8 +542,7 @@ TEST_F(TaskSchedulerImplTest, COMSTATaskRunnersRunWithCOMSTA) {
auto com_sta_task_runner = scheduler_.CreateCOMSTATaskRunnerWithTraits(
TaskTraits(), SingleThreadTaskRunnerThreadMode::SHARED);
- WaitableEvent task_ran(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent task_ran;
com_sta_task_runner->PostTask(
FROM_HERE, Bind(
[](WaitableEvent* task_ran) {
@@ -675,6 +683,11 @@ void VerifyHasStringOnStack(const std::string& query) {
// Many POSIX bots flakily crash on |debug::StackTrace().ToString()|,
// https://crbug.com/840429.
#define MAYBE_IdentifiableStacks DISABLED_IdentifiableStacks
+#elif defined(OS_WIN) && \
+ (defined(ADDRESS_SANITIZER) || BUILDFLAG(CFI_CAST_CHECK))
+// Hangs on WinASan and WinCFI (grabbing StackTrace() too slow?),
+// https://crbug.com/845010#c7.
+#define MAYBE_IdentifiableStacks DISABLED_IdentifiableStacks
#else
#define MAYBE_IdentifiableStacks IdentifiableStacks
#endif
@@ -746,13 +759,18 @@ TEST_F(TaskSchedulerImplTest, SchedulerWorkerObserver) {
testing::StrictMock<test::MockSchedulerWorkerObserver> observer;
set_scheduler_worker_observer(&observer);
-// 4 workers should be created for the 4 pools. After that, 8 threads should
-// be created for single-threaded work (16 on Windows).
+ // A worker should be created for each pool. After that, 8 threads should be
+ // created for single-threaded work (16 on Windows).
+ const int kExpectedNumPoolWorkers =
+ CanUseBackgroundPriorityForSchedulerWorker() ? 4 : 2;
#if defined(OS_WIN)
- constexpr int kExpectedNumWorkers = 20;
+ const int kExpectedNumSingleThreadedWorkers = 16;
#else
- constexpr int kExpectedNumWorkers = 12;
+ const int kExpectedNumSingleThreadedWorkers = 8;
#endif
+ const int kExpectedNumWorkers =
+ kExpectedNumPoolWorkers + kExpectedNumSingleThreadedWorkers;
+
EXPECT_CALL(observer, OnSchedulerWorkerMainEntry())
.Times(kExpectedNumWorkers);
diff --git a/chromium/base/task_scheduler/task_tracker.cc b/chromium/base/task_scheduler/task_tracker.cc
index ab46b9edcec..e4955fd7291 100644
--- a/chromium/base/task_scheduler/task_tracker.cc
+++ b/chromium/base/task_scheduler/task_tracker.cc
@@ -338,23 +338,23 @@ void TaskTracker::FlushAsyncForTesting(OnceClosure flush_callback) {
}
}
-bool TaskTracker::WillPostTask(const Task& task) {
- DCHECK(task.task);
+bool TaskTracker::WillPostTask(Task* task) {
+ DCHECK(task->task);
- if (!BeforePostTask(task.traits.shutdown_behavior()))
+ if (!BeforePostTask(task->traits.shutdown_behavior()))
return false;
- if (task.delayed_run_time.is_null())
+ if (task->delayed_run_time.is_null())
subtle::NoBarrier_AtomicIncrement(&num_incomplete_undelayed_tasks_, 1);
{
TRACE_EVENT_WITH_FLOW0(
kTaskSchedulerFlowTracingCategory, kQueueFunctionName,
- TRACE_ID_MANGLE(task_annotator_.GetTaskTraceID(task)),
+ TRACE_ID_MANGLE(task_annotator_.GetTaskTraceID(*task)),
TRACE_EVENT_FLAG_FLOW_OUT);
}
- task_annotator_.DidQueueTask(nullptr, task);
+ task_annotator_.WillQueueTask(nullptr, task);
return true;
}
@@ -439,9 +439,7 @@ void TaskTracker::SetHasShutdownStartedForTesting() {
// Create a dummy |shutdown_event_| to satisfy TaskTracker's expectation of
// its existence during shutdown (e.g. in OnBlockingShutdownTasksComplete()).
- shutdown_event_.reset(
- new WaitableEvent(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED));
+ shutdown_event_ = std::make_unique<WaitableEvent>();
state_->StartShutdown();
}
@@ -454,7 +452,7 @@ void TaskTracker::RecordLatencyHistogram(
DCHECK(latency_histogram_type == LatencyHistogramType::TASK_LATENCY ||
latency_histogram_type == LatencyHistogramType::HEARTBEAT_LATENCY);
- auto& histograms =
+ const auto& histograms =
latency_histogram_type == LatencyHistogramType::TASK_LATENCY
? task_latency_histograms_
: heartbeat_latency_histograms_;
@@ -550,9 +548,7 @@ void TaskTracker::PerformShutdown() {
DCHECK(!num_block_shutdown_tasks_posted_during_shutdown_);
DCHECK(!state_->HasShutdownStarted());
- shutdown_event_.reset(
- new WaitableEvent(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED));
+ shutdown_event_ = std::make_unique<WaitableEvent>();
const bool tasks_are_blocking_shutdown = state_->StartShutdown();
diff --git a/chromium/base/task_scheduler/task_tracker.h b/chromium/base/task_scheduler/task_tracker.h
index 760a8f710eb..3596eca7c49 100644
--- a/chromium/base/task_scheduler/task_tracker.h
+++ b/chromium/base/task_scheduler/task_tracker.h
@@ -85,6 +85,11 @@ namespace internal {
// Note: A background task is a task posted with TaskPriority::BACKGROUND. A
// foreground task is a task posted with TaskPriority::USER_VISIBLE or
// TaskPriority::USER_BLOCKING.
+//
+// TODO(fdoray): We want to allow disabling TaskPriority::BACKGROUND tasks in a
+// scope (e.g. during startup or page load), but we don't need a dynamic maximum
+// number of background tasks. The code could probably be simplified if it
+// didn't support that. https://crbug.com/831835
class BASE_EXPORT TaskTracker {
public:
// |histogram_label| is used as a suffix for histograms, it must not be empty.
@@ -123,7 +128,8 @@ class BASE_EXPORT TaskTracker {
// Informs this TaskTracker that |task| is about to be posted. Returns true if
// this operation is allowed (|task| should be posted if-and-only-if it is).
- bool WillPostTask(const Task& task);
+ // This method may also modify metadata on |task| if desired.
+ bool WillPostTask(Task* task);
// Informs this TaskTracker that |sequence| is about to be scheduled. If this
// returns |sequence|, it is expected that RunAndPopNextTask() will soon be
diff --git a/chromium/base/task_scheduler/task_tracker_posix_unittest.cc b/chromium/base/task_scheduler/task_tracker_posix_unittest.cc
index d8849defa9a..3ca753386bf 100644
--- a/chromium/base/task_scheduler/task_tracker_posix_unittest.cc
+++ b/chromium/base/task_scheduler/task_tracker_posix_unittest.cc
@@ -58,7 +58,7 @@ TEST_F(TaskSchedulerTaskTrackerPosixTest, RunTask) {
Bind([](bool* did_run) { *did_run = true; }, Unretained(&did_run)),
TaskTraits(), TimeDelta());
- EXPECT_TRUE(tracker_.WillPostTask(task));
+ EXPECT_TRUE(tracker_.WillPostTask(&task));
auto sequence = test::CreateSequenceWithTask(std::move(task));
EXPECT_EQ(sequence, tracker_.WillScheduleSequence(sequence, nullptr));
@@ -81,7 +81,7 @@ TEST_F(TaskSchedulerTaskTrackerPosixTest, FileDescriptorWatcher) {
// FileDescriptorWatcher::WatchReadable needs a SequencedTaskRunnerHandle.
task.sequenced_task_runner_ref = MakeRefCounted<NullTaskRunner>();
- EXPECT_TRUE(tracker_.WillPostTask(task));
+ EXPECT_TRUE(tracker_.WillPostTask(&task));
auto sequence = test::CreateSequenceWithTask(std::move(task));
EXPECT_EQ(sequence, tracker_.WillScheduleSequence(sequence, nullptr));
diff --git a/chromium/base/task_scheduler/task_tracker_unittest.cc b/chromium/base/task_scheduler/task_tracker_unittest.cc
index ea8a3c1c09a..159c9a9674b 100644
--- a/chromium/base/task_scheduler/task_tracker_unittest.cc
+++ b/chromium/base/task_scheduler/task_tracker_unittest.cc
@@ -30,7 +30,7 @@
#include "base/task_scheduler/task_traits.h"
#include "base/task_scheduler/test_utils.h"
#include "base/test/gtest_util.h"
-#include "base/test/histogram_tester.h"
+#include "base/test/metrics/histogram_tester.h"
#include "base/test/test_simple_task_runner.h"
#include "base/test/test_timeouts.h"
#include "base/threading/platform_thread.h"
@@ -120,7 +120,7 @@ class ThreadPostingAndRunningTask : public SimpleThread {
void Run() override {
bool post_succeeded = true;
if (action_ == Action::WILL_POST || action_ == Action::WILL_POST_AND_RUN) {
- post_succeeded = tracker_->WillPostTask(*task_);
+ post_succeeded = tracker_->WillPostTask(task_);
EXPECT_EQ(expect_post_succeeds_, post_succeeded);
}
if (post_succeeded &&
@@ -283,7 +283,7 @@ TEST_P(TaskSchedulerTaskTrackerTest, WillPostAndRunBeforeShutdown) {
Task task(CreateTask(GetParam()));
// Inform |task_tracker_| that |task| will be posted.
- EXPECT_TRUE(tracker_.WillPostTask(task));
+ EXPECT_TRUE(tracker_.WillPostTask(&task));
// Run the task.
EXPECT_EQ(0U, NumTasksExecuted());
@@ -313,7 +313,7 @@ TEST_P(TaskSchedulerTaskTrackerTest, WillPostAndRunLongTaskBeforeShutdown) {
TaskTraits(WithBaseSyncPrimitives(), GetParam()), TimeDelta());
// Inform |task_tracker_| that |blocked_task| will be posted.
- EXPECT_TRUE(tracker_.WillPostTask(blocked_task));
+ EXPECT_TRUE(tracker_.WillPostTask(&blocked_task));
// Create a thread to run the task. Wait until the task starts running.
ThreadPostingAndRunningTask thread_running_task(
@@ -345,12 +345,12 @@ TEST_P(TaskSchedulerTaskTrackerTest, WillPostAndRunLongTaskBeforeShutdown) {
TEST_P(TaskSchedulerTaskTrackerTest, WillPostBeforeShutdownRunDuringShutdown) {
// Inform |task_tracker_| that a task will be posted.
Task task(CreateTask(GetParam()));
- EXPECT_TRUE(tracker_.WillPostTask(task));
+ EXPECT_TRUE(tracker_.WillPostTask(&task));
// Inform |task_tracker_| that a BLOCK_SHUTDOWN task will be posted just to
// block shutdown.
Task block_shutdown_task(CreateTask(TaskShutdownBehavior::BLOCK_SHUTDOWN));
- EXPECT_TRUE(tracker_.WillPostTask(block_shutdown_task));
+ EXPECT_TRUE(tracker_.WillPostTask(&block_shutdown_task));
// Call Shutdown() asynchronously.
CallShutdownAsync();
@@ -374,7 +374,7 @@ TEST_P(TaskSchedulerTaskTrackerTest, WillPostBeforeShutdownRunDuringShutdown) {
TEST_P(TaskSchedulerTaskTrackerTest, WillPostBeforeShutdownRunAfterShutdown) {
// Inform |task_tracker_| that a task will be posted.
Task task(CreateTask(GetParam()));
- EXPECT_TRUE(tracker_.WillPostTask(task));
+ EXPECT_TRUE(tracker_.WillPostTask(&task));
// Call Shutdown() asynchronously.
CallShutdownAsync();
@@ -404,7 +404,7 @@ TEST_P(TaskSchedulerTaskTrackerTest, WillPostAndRunDuringShutdown) {
// Inform |task_tracker_| that a BLOCK_SHUTDOWN task will be posted just to
// block shutdown.
Task block_shutdown_task(CreateTask(TaskShutdownBehavior::BLOCK_SHUTDOWN));
- EXPECT_TRUE(tracker_.WillPostTask(block_shutdown_task));
+ EXPECT_TRUE(tracker_.WillPostTask(&block_shutdown_task));
// Call Shutdown() asynchronously.
CallShutdownAsync();
@@ -413,7 +413,7 @@ TEST_P(TaskSchedulerTaskTrackerTest, WillPostAndRunDuringShutdown) {
if (GetParam() == TaskShutdownBehavior::BLOCK_SHUTDOWN) {
// Inform |task_tracker_| that a BLOCK_SHUTDOWN task will be posted.
Task task(CreateTask(GetParam()));
- EXPECT_TRUE(tracker_.WillPostTask(task));
+ EXPECT_TRUE(tracker_.WillPostTask(&task));
// Run the BLOCK_SHUTDOWN task.
EXPECT_EQ(0U, NumTasksExecuted());
@@ -422,7 +422,7 @@ TEST_P(TaskSchedulerTaskTrackerTest, WillPostAndRunDuringShutdown) {
} else {
// It shouldn't be allowed to post a non BLOCK_SHUTDOWN task.
Task task(CreateTask(GetParam()));
- EXPECT_FALSE(tracker_.WillPostTask(task));
+ EXPECT_FALSE(tracker_.WillPostTask(&task));
// Don't try to run the task, because it wasn't allowed to be posted.
}
@@ -441,7 +441,7 @@ TEST_P(TaskSchedulerTaskTrackerTest, WillPostAfterShutdown) {
Task task(CreateTask(GetParam()));
// |task_tracker_| shouldn't allow a task to be posted after shutdown.
- EXPECT_FALSE(tracker_.WillPostTask(task));
+ EXPECT_FALSE(tracker_.WillPostTask(&task));
}
// Verify that BLOCK_SHUTDOWN and SKIP_ON_SHUTDOWN tasks can
@@ -452,7 +452,7 @@ TEST_P(TaskSchedulerTaskTrackerTest, SingletonAllowed) {
Task task(FROM_HERE, BindOnce(&ThreadRestrictions::AssertSingletonAllowed),
TaskTraits(GetParam()), TimeDelta());
- EXPECT_TRUE(tracker_.WillPostTask(task));
+ EXPECT_TRUE(tracker_.WillPostTask(&task));
// Set the singleton allowed bit to the opposite of what it is expected to be
// when |tracker| runs |task| to verify that |tracker| actually sets the
@@ -477,7 +477,7 @@ TEST_P(TaskSchedulerTaskTrackerTest, IOAllowed) {
AssertBlockingAllowed();
}),
TaskTraits(MayBlock(), GetParam()), TimeDelta());
- EXPECT_TRUE(tracker_.WillPostTask(task_with_may_block));
+ EXPECT_TRUE(tracker_.WillPostTask(&task_with_may_block));
DispatchAndRunTaskWithTracker(std::move(task_with_may_block));
// Set the IO allowed bit. Expect TaskTracker to unset it before running a
@@ -487,14 +487,14 @@ TEST_P(TaskSchedulerTaskTrackerTest, IOAllowed) {
FROM_HERE,
Bind([]() { EXPECT_DCHECK_DEATH({ AssertBlockingAllowed(); }); }),
TaskTraits(GetParam()), TimeDelta());
- EXPECT_TRUE(tracker_.WillPostTask(task_without_may_block));
+ EXPECT_TRUE(tracker_.WillPostTask(&task_without_may_block));
DispatchAndRunTaskWithTracker(std::move(task_without_may_block));
}
static void RunTaskRunnerHandleVerificationTask(TaskTracker* tracker,
Task verify_task) {
// Pretend |verify_task| is posted to respect TaskTracker's contract.
- EXPECT_TRUE(tracker->WillPostTask(verify_task));
+ EXPECT_TRUE(tracker->WillPostTask(&verify_task));
// Confirm that the test conditions are right (no TaskRunnerHandles set
// already).
@@ -576,17 +576,17 @@ TEST_P(TaskSchedulerTaskTrackerTest,
}
TEST_P(TaskSchedulerTaskTrackerTest, FlushPendingDelayedTask) {
- const Task delayed_task(FROM_HERE, DoNothing(), TaskTraits(GetParam()),
- TimeDelta::FromDays(1));
- tracker_.WillPostTask(delayed_task);
+ Task delayed_task(FROM_HERE, DoNothing(), TaskTraits(GetParam()),
+ TimeDelta::FromDays(1));
+ tracker_.WillPostTask(&delayed_task);
// FlushForTesting() should return even if the delayed task didn't run.
tracker_.FlushForTesting();
}
TEST_P(TaskSchedulerTaskTrackerTest, FlushAsyncForTestingPendingDelayedTask) {
- const Task delayed_task(FROM_HERE, DoNothing(), TaskTraits(GetParam()),
- TimeDelta::FromDays(1));
- tracker_.WillPostTask(delayed_task);
+ Task delayed_task(FROM_HERE, DoNothing(), TaskTraits(GetParam()),
+ TimeDelta::FromDays(1));
+ tracker_.WillPostTask(&delayed_task);
// FlushAsyncForTesting() should callback even if the delayed task didn't run.
bool called_back = false;
tracker_.FlushAsyncForTesting(
@@ -598,7 +598,7 @@ TEST_P(TaskSchedulerTaskTrackerTest, FlushAsyncForTestingPendingDelayedTask) {
TEST_P(TaskSchedulerTaskTrackerTest, FlushPendingUndelayedTask) {
Task undelayed_task(FROM_HERE, DoNothing(), TaskTraits(GetParam()),
TimeDelta());
- tracker_.WillPostTask(undelayed_task);
+ tracker_.WillPostTask(&undelayed_task);
// FlushForTesting() shouldn't return before the undelayed task runs.
CallFlushFromAnotherThread();
@@ -613,11 +613,10 @@ TEST_P(TaskSchedulerTaskTrackerTest, FlushPendingUndelayedTask) {
TEST_P(TaskSchedulerTaskTrackerTest, FlushAsyncForTestingPendingUndelayedTask) {
Task undelayed_task(FROM_HERE, DoNothing(), TaskTraits(GetParam()),
TimeDelta());
- tracker_.WillPostTask(undelayed_task);
+ tracker_.WillPostTask(&undelayed_task);
// FlushAsyncForTesting() shouldn't callback before the undelayed task runs.
- WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent event;
tracker_.FlushAsyncForTesting(
BindOnce(&WaitableEvent::Signal, Unretained(&event)));
PlatformThread::Sleep(TestTimeouts::tiny_timeout());
@@ -631,7 +630,7 @@ TEST_P(TaskSchedulerTaskTrackerTest, FlushAsyncForTestingPendingUndelayedTask) {
TEST_P(TaskSchedulerTaskTrackerTest, PostTaskDuringFlush) {
Task undelayed_task(FROM_HERE, DoNothing(), TaskTraits(GetParam()),
TimeDelta());
- tracker_.WillPostTask(undelayed_task);
+ tracker_.WillPostTask(&undelayed_task);
// FlushForTesting() shouldn't return before the undelayed task runs.
CallFlushFromAnotherThread();
@@ -641,7 +640,7 @@ TEST_P(TaskSchedulerTaskTrackerTest, PostTaskDuringFlush) {
// Simulate posting another undelayed task.
Task other_undelayed_task(FROM_HERE, DoNothing(), TaskTraits(GetParam()),
TimeDelta());
- tracker_.WillPostTask(other_undelayed_task);
+ tracker_.WillPostTask(&other_undelayed_task);
// Run the first undelayed task.
DispatchAndRunTaskWithTracker(std::move(undelayed_task));
@@ -658,11 +657,10 @@ TEST_P(TaskSchedulerTaskTrackerTest, PostTaskDuringFlush) {
TEST_P(TaskSchedulerTaskTrackerTest, PostTaskDuringFlushAsyncForTesting) {
Task undelayed_task(FROM_HERE, DoNothing(), TaskTraits(GetParam()),
TimeDelta());
- tracker_.WillPostTask(undelayed_task);
+ tracker_.WillPostTask(&undelayed_task);
// FlushAsyncForTesting() shouldn't callback before the undelayed task runs.
- WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent event;
tracker_.FlushAsyncForTesting(
BindOnce(&WaitableEvent::Signal, Unretained(&event)));
PlatformThread::Sleep(TestTimeouts::tiny_timeout());
@@ -671,7 +669,7 @@ TEST_P(TaskSchedulerTaskTrackerTest, PostTaskDuringFlushAsyncForTesting) {
// Simulate posting another undelayed task.
Task other_undelayed_task(FROM_HERE, DoNothing(), TaskTraits(GetParam()),
TimeDelta());
- tracker_.WillPostTask(other_undelayed_task);
+ tracker_.WillPostTask(&other_undelayed_task);
// Run the first undelayed task.
DispatchAndRunTaskWithTracker(std::move(undelayed_task));
@@ -691,10 +689,10 @@ TEST_P(TaskSchedulerTaskTrackerTest, RunDelayedTaskDuringFlush) {
// Simulate posting a delayed and an undelayed task.
Task delayed_task(FROM_HERE, DoNothing(), TaskTraits(GetParam()),
TimeDelta::FromDays(1));
- tracker_.WillPostTask(delayed_task);
+ tracker_.WillPostTask(&delayed_task);
Task undelayed_task(FROM_HERE, DoNothing(), TaskTraits(GetParam()),
TimeDelta());
- tracker_.WillPostTask(undelayed_task);
+ tracker_.WillPostTask(&undelayed_task);
// FlushForTesting() shouldn't return before the undelayed task runs.
CallFlushFromAnotherThread();
@@ -720,14 +718,13 @@ TEST_P(TaskSchedulerTaskTrackerTest, RunDelayedTaskDuringFlushAsyncForTesting) {
// Simulate posting a delayed and an undelayed task.
Task delayed_task(FROM_HERE, DoNothing(), TaskTraits(GetParam()),
TimeDelta::FromDays(1));
- tracker_.WillPostTask(delayed_task);
+ tracker_.WillPostTask(&delayed_task);
Task undelayed_task(FROM_HERE, DoNothing(), TaskTraits(GetParam()),
TimeDelta());
- tracker_.WillPostTask(undelayed_task);
+ tracker_.WillPostTask(&undelayed_task);
// FlushAsyncForTesting() shouldn't callback before the undelayed task runs.
- WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent event;
tracker_.FlushAsyncForTesting(
BindOnce(&WaitableEvent::Signal, Unretained(&event)));
PlatformThread::Sleep(TestTimeouts::tiny_timeout());
@@ -755,7 +752,7 @@ TEST_P(TaskSchedulerTaskTrackerTest, FlushAfterShutdown) {
// Simulate posting a task.
Task undelayed_task(FROM_HERE, DoNothing(), TaskTraits(GetParam()),
TimeDelta());
- tracker_.WillPostTask(undelayed_task);
+ tracker_.WillPostTask(&undelayed_task);
// Shutdown() should return immediately since there are no pending
// BLOCK_SHUTDOWN tasks.
@@ -773,7 +770,7 @@ TEST_P(TaskSchedulerTaskTrackerTest, FlushAfterShutdownAsync) {
// Simulate posting a task.
Task undelayed_task(FROM_HERE, DoNothing(), TaskTraits(GetParam()),
TimeDelta());
- tracker_.WillPostTask(undelayed_task);
+ tracker_.WillPostTask(&undelayed_task);
// Shutdown() should return immediately since there are no pending
// BLOCK_SHUTDOWN tasks.
@@ -795,7 +792,7 @@ TEST_P(TaskSchedulerTaskTrackerTest, ShutdownDuringFlush) {
// Simulate posting a task.
Task undelayed_task(FROM_HERE, DoNothing(), TaskTraits(GetParam()),
TimeDelta());
- tracker_.WillPostTask(undelayed_task);
+ tracker_.WillPostTask(&undelayed_task);
// FlushForTesting() shouldn't return before the undelayed task runs or
// shutdown completes.
@@ -818,12 +815,11 @@ TEST_P(TaskSchedulerTaskTrackerTest, ShutdownDuringFlushAsyncForTesting) {
// Simulate posting a task.
Task undelayed_task(FROM_HERE, DoNothing(), TaskTraits(GetParam()),
TimeDelta());
- tracker_.WillPostTask(undelayed_task);
+ tracker_.WillPostTask(&undelayed_task);
// FlushAsyncForTesting() shouldn't callback before the undelayed task runs or
// shutdown completes.
- WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent event;
tracker_.FlushAsyncForTesting(
BindOnce(&WaitableEvent::Signal, Unretained(&event)));
PlatformThread::Sleep(TestTimeouts::tiny_timeout());
@@ -841,7 +837,7 @@ TEST_P(TaskSchedulerTaskTrackerTest, ShutdownDuringFlushAsyncForTesting) {
TEST_P(TaskSchedulerTaskTrackerTest, DoublePendingFlushAsyncForTestingFails) {
Task undelayed_task(FROM_HERE, DoNothing(), TaskTraits(GetParam()),
TimeDelta());
- tracker_.WillPostTask(undelayed_task);
+ tracker_.WillPostTask(&undelayed_task);
// FlushAsyncForTesting() shouldn't callback before the undelayed task runs.
bool called_back = false;
@@ -881,7 +877,7 @@ TEST_F(TaskSchedulerTaskTrackerTest, CurrentSequenceToken) {
const SequenceToken sequence_token = sequence->token();
Task task(FROM_HERE, Bind(&ExpectSequenceToken, sequence_token), TaskTraits(),
TimeDelta());
- tracker_.WillPostTask(task);
+ tracker_.WillPostTask(&task);
sequence->PushTask(std::move(task));
@@ -995,7 +991,7 @@ TEST_F(TaskSchedulerTaskTrackerTest, LoadWillPostAndRunDuringShutdown) {
// Inform |task_tracker_| that a BLOCK_SHUTDOWN task will be posted just to
// block shutdown.
Task block_shutdown_task(CreateTask(TaskShutdownBehavior::BLOCK_SHUTDOWN));
- EXPECT_TRUE(tracker_.WillPostTask(block_shutdown_task));
+ EXPECT_TRUE(tracker_.WillPostTask(&block_shutdown_task));
// Call Shutdown() asynchronously.
CallShutdownAsync();
@@ -1040,9 +1036,9 @@ TEST_F(TaskSchedulerTaskTrackerTest, LoadWillPostAndRunDuringShutdown) {
TEST_F(TaskSchedulerTaskTrackerTest,
RunAndPopNextTaskReturnsSequenceToReschedule) {
Task task_1(FROM_HERE, DoNothing(), TaskTraits(), TimeDelta());
- EXPECT_TRUE(tracker_.WillPostTask(task_1));
+ EXPECT_TRUE(tracker_.WillPostTask(&task_1));
Task task_2(FROM_HERE, DoNothing(), TaskTraits(), TimeDelta());
- EXPECT_TRUE(tracker_.WillPostTask(task_2));
+ EXPECT_TRUE(tracker_.WillPostTask(&task_2));
scoped_refptr<Sequence> sequence =
test::CreateSequenceWithTask(std::move(task_1));
@@ -1069,7 +1065,7 @@ TEST_F(TaskSchedulerTaskTrackerTest,
for (int i = 0; i < kMaxNumScheduledBackgroundSequences; ++i) {
Task task(FROM_HERE, DoNothing(), TaskTraits(TaskPriority::BACKGROUND),
TimeDelta());
- EXPECT_TRUE(tracker.WillPostTask(task));
+ EXPECT_TRUE(tracker.WillPostTask(&task));
scoped_refptr<Sequence> sequence =
test::CreateSequenceWithTask(std::move(task));
EXPECT_EQ(sequence,
@@ -1092,7 +1088,7 @@ TEST_F(TaskSchedulerTaskTrackerTest,
BindOnce([](bool* extra_task_did_run) { *extra_task_did_run = true; },
Unretained(extra_tasks_did_run.back().get())),
TaskTraits(TaskPriority::BACKGROUND), TimeDelta());
- EXPECT_TRUE(tracker.WillPostTask(extra_task));
+ EXPECT_TRUE(tracker.WillPostTask(&extra_task));
extra_sequences.push_back(
test::CreateSequenceWithTask(std::move(extra_task)));
extra_observers.push_back(
@@ -1148,7 +1144,7 @@ TEST_F(TaskSchedulerTaskTrackerTest,
bool task_a_1_did_run = false;
Task task_a_1(FROM_HERE, BindOnce(&SetBool, Unretained(&task_a_1_did_run)),
TaskTraits(TaskPriority::BACKGROUND), TimeDelta());
- EXPECT_TRUE(tracker.WillPostTask(task_a_1));
+ EXPECT_TRUE(tracker.WillPostTask(&task_a_1));
scoped_refptr<Sequence> sequence_a =
test::CreateSequenceWithTask(std::move(task_a_1));
EXPECT_EQ(sequence_a,
@@ -1160,7 +1156,7 @@ TEST_F(TaskSchedulerTaskTrackerTest,
bool task_b_1_did_run = false;
Task task_b_1(FROM_HERE, BindOnce(&SetBool, Unretained(&task_b_1_did_run)),
TaskTraits(TaskPriority::BACKGROUND), TimeDelta());
- EXPECT_TRUE(tracker.WillPostTask(task_b_1));
+ EXPECT_TRUE(tracker.WillPostTask(&task_b_1));
scoped_refptr<Sequence> sequence_b =
test::CreateSequenceWithTask(std::move(task_b_1));
testing::StrictMock<MockCanScheduleSequenceObserver> task_b_1_observer;
@@ -1174,7 +1170,7 @@ TEST_F(TaskSchedulerTaskTrackerTest,
bool task_a_2_did_run = false;
Task task_a_2(FROM_HERE, BindOnce(&SetBool, Unretained(&task_a_2_did_run)),
TaskTraits(TaskPriority::BACKGROUND), TimeDelta());
- EXPECT_TRUE(tracker.WillPostTask(task_a_2));
+ EXPECT_TRUE(tracker.WillPostTask(&task_a_2));
sequence_a->PushTask(std::move(task_a_2));
// Run the first task in |sequence_a|. RunAndPopNextTask() should return
@@ -1217,7 +1213,7 @@ TEST_F(TaskSchedulerTaskTrackerTest,
TaskTraits(TaskPriority::BACKGROUND,
TaskShutdownBehavior::BLOCK_SHUTDOWN),
TimeDelta());
- EXPECT_TRUE(tracker.WillPostTask(task));
+ EXPECT_TRUE(tracker.WillPostTask(&task));
scoped_refptr<Sequence> sequence =
test::CreateSequenceWithTask(std::move(task));
EXPECT_FALSE(tracker.WillScheduleSequence(sequence, &observer));
@@ -1260,7 +1256,7 @@ class WaitAllowedTestThread : public SimpleThread {
EXPECT_DCHECK_DEATH({ internal::AssertBaseSyncPrimitivesAllowed(); });
}),
TaskTraits(), TimeDelta());
- EXPECT_TRUE(task_tracker->WillPostTask(task_without_sync_primitives));
+ EXPECT_TRUE(task_tracker->WillPostTask(&task_without_sync_primitives));
testing::StrictMock<MockCanScheduleSequenceObserver>
never_notified_observer;
auto sequence_without_sync_primitives = task_tracker->WillScheduleSequence(
@@ -1279,7 +1275,7 @@ class WaitAllowedTestThread : public SimpleThread {
internal::AssertBaseSyncPrimitivesAllowed();
}),
TaskTraits(WithBaseSyncPrimitives()), TimeDelta());
- EXPECT_TRUE(task_tracker->WillPostTask(task_with_sync_primitives));
+ EXPECT_TRUE(task_tracker->WillPostTask(&task_with_sync_primitives));
auto sequence_with_sync_primitives = task_tracker->WillScheduleSequence(
test::CreateSequenceWithTask(std::move(task_with_sync_primitives)),
&never_notified_observer);
@@ -1351,7 +1347,7 @@ TEST(TaskSchedulerTaskTrackerHistogramTest, TaskLatency) {
for (const auto& test : kTests) {
Task task(FROM_HERE, DoNothing(), test.traits, TimeDelta());
- ASSERT_TRUE(tracker.WillPostTask(task));
+ ASSERT_TRUE(tracker.WillPostTask(&task));
HistogramTester tester;
diff --git a/chromium/base/task_scheduler/task_traits.h b/chromium/base/task_scheduler/task_traits.h
index a4a41fe724a..b0d2242576c 100644
--- a/chromium/base/task_scheduler/task_traits.h
+++ b/chromium/base/task_scheduler/task_traits.h
@@ -24,7 +24,12 @@ enum class TaskPriority {
// This will always be equal to the lowest priority available.
LOWEST = 0,
// User won't notice if this task takes an arbitrarily long time to complete.
+ // TODO(gab): Eliminate BACKGROUND in favor of BEST_EFFORT.
BACKGROUND = LOWEST,
+ // This task will only be scheduled when machine resources are available. Once
+ // running, it may be descheduled if higher priority work arrives (in this
+ // process or another) and its running on a non-critical thread.
+ BEST_EFFORT = BACKGROUND,
// This task affects UI or responsiveness of future user interactions. It is
// not an immediate response to a user interaction.
// Examples:
diff --git a/chromium/base/task_scheduler/tracked_ref.h b/chromium/base/task_scheduler/tracked_ref.h
index d99a3455a49..4c68622e343 100644
--- a/chromium/base/task_scheduler/tracked_ref.h
+++ b/chromium/base/task_scheduler/tracked_ref.h
@@ -126,9 +126,7 @@ class TrackedRefFactory {
~TrackedRefFactory() {
// Enter the destruction phase.
- ready_to_destroy_ = std::make_unique<WaitableEvent>(
- WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ ready_to_destroy_ = std::make_unique<WaitableEvent>();
// Release self-ref (if this was the last one it will signal the event right
// away).
diff --git a/chromium/base/test/BUILD.gn b/chromium/base/test/BUILD.gn
index 576729c522e..a90c42c20f1 100644
--- a/chromium/base/test/BUILD.gn
+++ b/chromium/base/test/BUILD.gn
@@ -26,6 +26,19 @@ static_library("test_config") {
static_library("test_support") {
testonly = true
sources = [
+ "../task/sequence_manager/test/fake_task.cc",
+ "../task/sequence_manager/test/fake_task.h",
+ "../task/sequence_manager/test/lazy_thread_controller_for_test.cc",
+ "../task/sequence_manager/test/lazy_thread_controller_for_test.h",
+ "../task/sequence_manager/test/mock_time_domain.cc",
+ "../task/sequence_manager/test/mock_time_domain.h",
+ "../task/sequence_manager/test/sequence_manager_for_test.cc",
+ "../task/sequence_manager/test/sequence_manager_for_test.h",
+ "../task/sequence_manager/test/test_task_queue.cc",
+ "../task/sequence_manager/test/test_task_queue.h",
+ "../task/sequence_manager/test/test_task_time_observer.h",
+ "../timer/mock_timer.cc",
+ "../timer/mock_timer.h",
"../trace_event/trace_config_memory_test_util.h",
"android/java_handler_thread_helpers.cc",
"android/java_handler_thread_helpers.h",
@@ -41,8 +54,6 @@ static_library("test_support") {
"gtest_xml_unittest_result_printer.h",
"gtest_xml_util.cc",
"gtest_xml_util.h",
- "histogram_tester.cc",
- "histogram_tester.h",
"icu_test_util.cc",
"icu_test_util.h",
"ios/wait_util.h",
@@ -51,6 +62,12 @@ static_library("test_support") {
"launcher/test_result.h",
"launcher/test_results_tracker.h",
"launcher/unit_test_launcher.h",
+ "metrics/histogram_enum_reader.cc",
+ "metrics/histogram_enum_reader.h",
+ "metrics/histogram_tester.cc",
+ "metrics/histogram_tester.h",
+ "metrics/user_action_tester.cc",
+ "metrics/user_action_tester.h",
"mock_callback.h",
"mock_chrome_application_mac.h",
"mock_chrome_application_mac.mm",
@@ -133,8 +150,6 @@ static_library("test_support") {
"trace_event_analyzer.h",
"trace_to_file.cc",
"trace_to_file.h",
- "user_action_tester.cc",
- "user_action_tester.h",
"values_test_util.cc",
"values_test_util.h",
]
@@ -183,6 +198,10 @@ static_library("test_support") {
]
}
+ if (is_fuchsia) {
+ deps += [ "//third_party/fuchsia-sdk:zx" ]
+ }
+
if (is_linux) {
public_deps += [ ":fontconfig_util_linux" ]
data_deps = [
@@ -227,6 +246,8 @@ static_library("test_support") {
"gtest_xml_util.h",
"icu_test_util.cc",
"icu_test_util.h",
+ "metrics/histogram_enum_reader.cc",
+ "metrics/histogram_enum_reader.h",
"perf_test_suite.cc",
"perf_test_suite.h",
"scoped_path_override.cc",
@@ -347,7 +368,6 @@ if (is_linux) {
deps = [
":fontconfig_util_linux",
"//base",
- "//build/config:exe_and_shlib_deps",
]
}
@@ -371,7 +391,6 @@ if (is_linux) {
]
deps = [
"//base",
- "//build/config:exe_and_shlib_deps",
]
}
}
@@ -437,7 +456,4 @@ executable("test_child_process") {
sources = [
"test_child_process.cc",
]
- deps = [
- "//build/config:exe_and_shlib_deps",
- ]
}
diff --git a/chromium/base/test/fontconfig_util_linux.cc b/chromium/base/test/fontconfig_util_linux.cc
new file mode 100644
index 00000000000..6848893f2f1
--- /dev/null
+++ b/chromium/base/test/fontconfig_util_linux.cc
@@ -0,0 +1,423 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/fontconfig_util_linux.h"
+
+#include <fontconfig/fontconfig.h>
+
+#include "base/base_paths.h"
+#include "base/environment.h"
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/path_service.h"
+#include "base/strings/string_util.h"
+
+namespace base {
+
+namespace {
+
+const char kFontsConfTemplate[] = R"(<?xml version="1.0"?>
+<!DOCTYPE fontconfig SYSTEM "fonts.dtd">
+<fontconfig>
+
+ <!-- Cache location. -->
+ <cachedir>$1</cachedir>
+
+ <!-- GCS-synced fonts. -->
+ <dir>$2</dir>
+
+ <!-- Default properties. -->
+ <match target="font">
+ <edit name="embeddedbitmap" mode="append_last">
+ <bool>false</bool>
+ </edit>
+ </match>
+
+ <!-- TODO(thomasanderson): Figure out why this is necessary. -->
+ <match target="pattern">
+ <test name="family" compare="eq">
+ <string>Tinos</string>
+ </test>
+ <test name="prgname" compare="eq">
+ <string>chromevox_tests</string>
+ </test>
+ <edit name="hintstyle" mode="assign">
+ <const>hintslight</const>
+ </edit>
+ </match>
+
+ <match target="pattern">
+ <test qual="any" name="family">
+ <string>Times</string>
+ </test>
+ <edit name="family" mode="assign">
+ <string>Tinos</string>
+ </edit>
+ </match>
+
+ <match target="pattern">
+ <test qual="any" name="family">
+ <string>sans</string>
+ </test>
+ <edit name="family" mode="assign">
+ <string>DejaVu Sans</string>
+ </edit>
+ </match>
+
+ <match target="pattern">
+ <test qual="any" name="family">
+ <string>sans serif</string>
+ </test>
+ <edit name="family" mode="assign">
+ <string>Arimo</string>
+ </edit>
+ </match>
+
+ <!-- Some layout tests specify Helvetica as a family and we need to make sure
+ that we don't fallback to Tinos for them -->
+ <match target="pattern">
+ <test qual="any" name="family">
+ <string>Helvetica</string>
+ </test>
+ <edit name="family" mode="assign">
+ <string>Arimo</string>
+ </edit>
+ </match>
+
+ <match target="pattern">
+ <test qual="any" name="family">
+ <string>sans-serif</string>
+ </test>
+ <edit name="family" mode="assign">
+ <string>Arimo</string>
+ </edit>
+ </match>
+
+ <match target="pattern">
+ <test qual="any" name="family">
+ <string>serif</string>
+ </test>
+ <edit name="family" mode="assign">
+ <string>Tinos</string>
+ </edit>
+ </match>
+
+ <match target="pattern">
+ <test qual="any" name="family">
+ <string>mono</string>
+ </test>
+ <edit name="family" mode="assign">
+ <string>Cousine</string>
+ </edit>
+ </match>
+
+ <match target="pattern">
+ <test qual="any" name="family">
+ <string>monospace</string>
+ </test>
+ <edit name="family" mode="assign">
+ <string>Cousine</string>
+ </edit>
+ </match>
+
+ <match target="pattern">
+ <test qual="any" name="family">
+ <string>Courier</string>
+ </test>
+ <edit name="family" mode="assign">
+ <string>Cousine</string>
+ </edit>
+ </match>
+
+ <match target="pattern">
+ <test qual="any" name="family">
+ <string>cursive</string>
+ </test>
+ <edit name="family" mode="assign">
+ <string>Comic Sans MS</string>
+ </edit>
+ </match>
+
+ <match target="pattern">
+ <test qual="any" name="family">
+ <string>fantasy</string>
+ </test>
+ <edit name="family" mode="assign">
+ <string>Impact</string>
+ </edit>
+ </match>
+
+ <match target="pattern">
+ <test qual="any" name="family">
+ <string>Monaco</string>
+ </test>
+ <edit name="family" mode="assign">
+ <string>Tinos</string>
+ </edit>
+ </match>
+
+ <match target="pattern">
+ <test qual="any" name="family">
+ <string>Arial</string>
+ </test>
+ <edit name="family" mode="assign">
+ <string>Arimo</string>
+ </edit>
+ </match>
+
+ <match target="pattern">
+ <test qual="any" name="family">
+ <string>Courier New</string>
+ </test>
+ <edit name="family" mode="assign">
+ <string>Cousine</string>
+ </edit>
+ </match>
+
+ <match target="pattern">
+ <test qual="any" name="family">
+ <string>Georgia</string>
+ </test>
+ <edit name="family" mode="assign">
+ <string>Gelasio</string>
+ </edit>
+ </match>
+
+ <match target="pattern">
+ <test qual="any" name="family">
+ <string>Times New Roman</string>
+ </test>
+ <edit name="family" mode="assign">
+ <string>Tinos</string>
+ </edit>
+ </match>
+
+ <match target="pattern">
+ <test qual="any" name="family">
+ <string>Verdana</string>
+ </test>
+ <!-- NOT metrically compatible! -->
+ <edit name="family" mode="assign">
+ <string>Arimo</string>
+ </edit>
+ </match>
+
+ <!-- TODO(thomasanderson): Move these configs to be test-specific. -->
+ <match target="pattern">
+ <test name="family" compare="eq">
+ <string>NonAntiAliasedSans</string>
+ </test>
+ <edit name="family" mode="assign">
+ <string>Arimo</string>
+ </edit>
+ <edit name="antialias" mode="assign">
+ <bool>false</bool>
+ </edit>
+ </match>
+
+ <match target="pattern">
+ <test name="family" compare="eq">
+ <string>SlightHintedGeorgia</string>
+ </test>
+ <edit name="family" mode="assign">
+ <string>Gelasio</string>
+ </edit>
+ <edit name="hintstyle" mode="assign">
+ <const>hintslight</const>
+ </edit>
+ </match>
+
+ <match target="pattern">
+ <test name="family" compare="eq">
+ <string>NonHintedSans</string>
+ </test>
+ <edit name="family" mode="assign">
+ <string>Arimo</string>
+ </edit>
+ <!-- These deliberately contradict each other. The 'hinting' preference
+ should take priority -->
+ <edit name="hintstyle" mode="assign">
+ <const>hintfull</const>
+ </edit>
+ <edit name="hinting" mode="assign">
+ <bool>false</bool>
+ </edit>
+ </match>
+
+ <match target="pattern">
+ <test name="family" compare="eq">
+ <string>AutohintedSerif</string>
+ </test>
+ <edit name="family" mode="assign">
+ <string>Arimo</string>
+ </edit>
+ <edit name="autohint" mode="assign">
+ <bool>true</bool>
+ </edit>
+ <edit name="hintstyle" mode="assign">
+ <const>hintmedium</const>
+ </edit>
+ </match>
+
+ <match target="pattern">
+ <test name="family" compare="eq">
+ <string>HintedSerif</string>
+ </test>
+ <edit name="family" mode="assign">
+ <string>Arimo</string>
+ </edit>
+ <edit name="autohint" mode="assign">
+ <bool>false</bool>
+ </edit>
+ <edit name="hintstyle" mode="assign">
+ <const>hintmedium</const>
+ </edit>
+ </match>
+
+ <match target="pattern">
+ <test name="family" compare="eq">
+ <string>FullAndAutoHintedSerif</string>
+ </test>
+ <edit name="family" mode="assign">
+ <string>Arimo</string>
+ </edit>
+ <edit name="autohint" mode="assign">
+ <bool>true</bool>
+ </edit>
+ <edit name="hintstyle" mode="assign">
+ <const>hintfull</const>
+ </edit>
+ </match>
+
+ <match target="pattern">
+ <test name="family" compare="eq">
+ <string>SubpixelEnabledArial</string>
+ </test>
+ <edit name="family" mode="assign">
+ <string>Arimo</string>
+ </edit>
+ <edit name="rgba" mode="assign">
+ <const>rgb</const>
+ </edit>
+ </match>
+
+ <match target="pattern">
+ <test name="family" compare="eq">
+ <string>SubpixelDisabledArial</string>
+ </test>
+ <edit name="family" mode="assign">
+ <string>Arimo</string>
+ </edit>
+ <edit name="rgba" mode="assign">
+ <const>none</const>
+ </edit>
+ </match>
+
+ <match target="pattern">
+ <!-- FontConfig doesn't currently provide a well-defined way to turn on
+ subpixel positioning. This is just an arbitrary pattern to use after
+ turning subpixel positioning on globally to ensure that we don't have
+ issues with our style getting cached for other tests. -->
+ <test name="family" compare="eq">
+ <string>SubpixelPositioning</string>
+ </test>
+ <edit name="family" mode="assign">
+ <string>Tinos</string>
+ </edit>
+ </match>
+
+ <match target="pattern">
+ <!-- See comments above -->
+ <test name="family" compare="eq">
+ <string>SubpixelPositioningAhem</string>
+ </test>
+ <edit name="family" mode="assign">
+ <string>ahem</string>
+ </edit>
+ </match>
+
+ <match target="pattern">
+ <test name="family" compare="eq">
+ <string>SlightHintedTimesNewRoman</string>
+ </test>
+ <edit name="family" mode="assign">
+ <string>Tinos</string>
+ </edit>
+ <edit name="hintstyle" mode="assign">
+ <const>hintslight</const>
+ </edit>
+ </match>
+
+ <!-- When we encounter a character that the current font doesn't
+ support, gfx::GetFallbackFontForChar() returns the first font
+ that does have a glyph for the character. The list of fonts is
+ sorted by a pattern that includes the current locale, but doesn't
+ include a font family (which means that the fallback font depends
+ on the locale but not on the current font).
+
+ DejaVu Sans is commonly the only font that supports some
+ characters, such as "⇧", and even when other candidates are
+ available, DejaVu Sans is commonly first among them, because of
+ the way Fontconfig is ordinarily configured. For example, the
+ configuration in the Fonconfig source lists DejaVu Sans under the
+ sans-serif generic family, and appends sans-serif to patterns
+ that don't already include a generic family (such as the pattern
+ in gfx::GetFallbackFontForChar()).
+
+ To get the same fallback font in the layout tests, we could
+ duplicate this configuration here, or more directly, simply
+ append DejaVu Sans to all patterns. -->
+ <match target="pattern">
+ <edit name="family" mode="append_last">
+ <string>DejaVu Sans</string>
+ </edit>
+ </match>
+
+</fontconfig>
+)";
+
+} // namespace
+
+void SetUpFontconfig() {
+ // TODO(thomasanderson): Use FONTCONFIG_SYSROOT to avoid having to write
+ // a new fonts.conf with updated paths.
+ std::unique_ptr<Environment> env = Environment::Create();
+ if (!env->HasVar("FONTCONFIG_FILE")) {
+ // fonts.conf must be generated on-the-fly since it contains absolute paths
+ // which may be different if
+ // 1. The user moves/renames their build directory (or any parent dirs).
+ // 2. The build directory is mapped on a swarming bot at a location
+ // different from the one the buildbot used.
+ FilePath dir_module;
+ PathService::Get(DIR_MODULE, &dir_module);
+ FilePath font_cache = dir_module.Append("fontconfig_caches");
+ FilePath test_fonts = dir_module.Append("test_fonts");
+ std::string fonts_conf = ReplaceStringPlaceholders(
+ kFontsConfTemplate, {font_cache.value(), test_fonts.value()}, nullptr);
+
+ // Write the data to a different file and then atomically rename it to
+ // fonts.conf. This avoids the file being in a bad state when different
+ // parallel tests call this function at the same time.
+ FilePath fonts_conf_file_temp;
+ if(!CreateTemporaryFileInDir(dir_module, &fonts_conf_file_temp))
+ CHECK(CreateTemporaryFile(&fonts_conf_file_temp));
+ CHECK(
+ WriteFile(fonts_conf_file_temp, fonts_conf.c_str(), fonts_conf.size()));
+ FilePath fonts_conf_file = dir_module.Append("fonts.conf");
+ if (ReplaceFile(fonts_conf_file_temp, fonts_conf_file, nullptr))
+ env->SetVar("FONTCONFIG_FILE", fonts_conf_file.value());
+ else
+ env->SetVar("FONTCONFIG_FILE", fonts_conf_file_temp.value());
+ }
+
+ CHECK(FcInit());
+}
+
+void TearDownFontconfig() {
+ FcFini();
+}
+
+} // namespace base
diff --git a/chromium/base/test/fontconfig_util_linux.h b/chromium/base/test/fontconfig_util_linux.h
new file mode 100644
index 00000000000..3122526d6e8
--- /dev/null
+++ b/chromium/base/test/fontconfig_util_linux.h
@@ -0,0 +1,18 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_FONTCONFIG_UTIL_LINUX_H_
+#define BASE_TEST_FONTCONFIG_UTIL_LINUX_H_
+
+namespace base {
+
+// Initializes Fontconfig with a custom configuration suitable for tests.
+void SetUpFontconfig();
+
+// Deinitializes Fontconfig.
+void TearDownFontconfig();
+
+} // namespace base
+
+#endif // BASE_TEST_FONTCONFIG_UTIL_LINUX_H_
diff --git a/chromium/base/third_party/symbolize/symbolize.cc b/chromium/base/third_party/symbolize/symbolize.cc
index e6fbb84fd4d..16cb80c77e4 100644
--- a/chromium/base/third_party/symbolize/symbolize.cc
+++ b/chromium/base/third_party/symbolize/symbolize.cc
@@ -778,8 +778,11 @@ static ATTRIBUTE_NOINLINE bool SymbolizeAndDemangle(void *pc, char *out,
out_size - 1);
}
+#if defined(PRINT_UNSYMBOLIZED_STACK_TRACES)
+ {
+ FileDescriptor wrapped_object_fd(object_fd);
+#else
// Check whether a file name was returned.
-#if !defined(PRINT_UNSYMBOLIZED_STACK_TRACES)
if (object_fd < 0) {
#endif
if (out[1]) {
@@ -795,9 +798,7 @@ static ATTRIBUTE_NOINLINE bool SymbolizeAndDemangle(void *pc, char *out,
}
// Failed to determine the object file containing PC. Bail out.
return false;
-#if !defined(PRINT_UNSYMBOLIZED_STACK_TRACES)
}
-#endif
FileDescriptor wrapped_object_fd(object_fd);
int elf_type = FileGetElfType(wrapped_object_fd.get());
if (elf_type == -1) {
diff --git a/chromium/base/threading/OWNERS b/chromium/base/threading/OWNERS
deleted file mode 100644
index 4198e99c5cc..00000000000
--- a/chromium/base/threading/OWNERS
+++ /dev/null
@@ -1,2 +0,0 @@
-# For thread_resrictions.*
-jam@chromium.org
diff --git a/chromium/base/threading/platform_thread_unittest.cc b/chromium/base/threading/platform_thread_unittest.cc
index 7eea22e6b2b..10c45bccf1a 100644
--- a/chromium/base/threading/platform_thread_unittest.cc
+++ b/chromium/base/threading/platform_thread_unittest.cc
@@ -259,7 +259,13 @@ class ThreadPriorityTestThread : public FunctionTestThread {
// Test changing a created thread's priority (which has different semantics on
// some platforms).
-TEST(PlatformThreadTest, ThreadPriorityCurrentThread) {
+#if defined(OS_FUCHSIA)
+// TODO(crbug.com/851759): Thread priorities are not implemented in Fuchsia.
+#define MAYBE_ThreadPriorityCurrentThread DISABLED_ThreadPriorityCurrentThread
+#else
+#define MAYBE_ThreadPriorityCurrentThread ThreadPriorityCurrentThread
+#endif
+TEST(PlatformThreadTest, MAYBE_ThreadPriorityCurrentThread) {
const bool increase_priority_allowed =
PlatformThread::CanIncreaseCurrentThreadPriority();
diff --git a/chromium/base/threading/simple_thread.cc b/chromium/base/threading/simple_thread.cc
index 04a5285e939..4b260627d96 100644
--- a/chromium/base/threading/simple_thread.cc
+++ b/chromium/base/threading/simple_thread.cc
@@ -53,7 +53,7 @@ void SimpleThread::StartAsync() {
&thread_, options_.priority)
: PlatformThread::CreateNonJoinableWithPriority(
options_.stack_size, this, options_.priority);
- DCHECK(success);
+ CHECK(success);
}
PlatformThreadId SimpleThread::tid() {
diff --git a/chromium/base/threading/thread_local_storage.h b/chromium/base/threading/thread_local_storage.h
index f84ac33f84a..9cb5aeb07b8 100644
--- a/chromium/base/threading/thread_local_storage.h
+++ b/chromium/base/threading/thread_local_storage.h
@@ -19,13 +19,21 @@
#endif
namespace heap_profiling {
-class ScopedAllowLogging;
+class ScopedAllowAlloc;
} // namespace heap_profiling
+namespace ui {
+class TLSDestructionCheckerForX11;
+}
+
namespace base {
class SamplingHeapProfiler;
+namespace debug {
+class GlobalActivityTracker;
+} // namespace debug
+
namespace trace_event {
class MallocDumpProvider;
} // namespace trace_event
@@ -156,7 +164,9 @@ class BASE_EXPORT ThreadLocalStorage {
friend class base::SamplingHeapProfiler;
friend class base::internal::ThreadLocalStorageTestInternal;
friend class base::trace_event::MallocDumpProvider;
- friend class heap_profiling::ScopedAllowLogging;
+ friend class debug::GlobalActivityTracker;
+ friend class heap_profiling::ScopedAllowAlloc;
+ friend class ui::TLSDestructionCheckerForX11;
static bool HasBeenDestroyed();
DISALLOW_COPY_AND_ASSIGN(ThreadLocalStorage);
diff --git a/chromium/base/threading/thread_perftest.cc b/chromium/base/threading/thread_perftest.cc
index bf890496645..d3fad972f8a 100644
--- a/chromium/base/threading/thread_perftest.cc
+++ b/chromium/base/threading/thread_perftest.cc
@@ -162,7 +162,10 @@ class TaskObserverPerfTest : public TaskPerfTest {
void Init() override {
TaskPerfTest::Init();
for (size_t i = 0; i < threads_.size(); i++) {
- threads_[i]->message_loop()->AddTaskObserver(&message_loop_observer);
+ threads_[i]->message_loop()->task_runner()->PostTask(
+ FROM_HERE, BindOnce(&MessageLoop::AddTaskObserver,
+ Unretained(threads_[i]->message_loop()),
+ Unretained(&message_loop_observer)));
}
}
};
diff --git a/chromium/base/threading/thread_restrictions.cc b/chromium/base/threading/thread_restrictions.cc
index 633bcb26e6e..36c64b51be9 100644
--- a/chromium/base/threading/thread_restrictions.cc
+++ b/chromium/base/threading/thread_restrictions.cc
@@ -31,7 +31,7 @@ void AssertBlockingAllowed() {
"blocking! If this task is running inside the TaskScheduler, it needs "
"to have MayBlock() in its TaskTraits. Otherwise, consider making "
"this blocking work asynchronous or, as a last resort, you may use "
- "ScopedAllowBlocking in a narrow scope.";
+ "ScopedAllowBlocking (see its documentation for best practices).";
}
void DisallowBlocking() {
diff --git a/chromium/base/threading/thread_restrictions.h b/chromium/base/threading/thread_restrictions.h
index 57f2f21e9ee..705ba4ddee2 100644
--- a/chromium/base/threading/thread_restrictions.h
+++ b/chromium/base/threading/thread_restrictions.h
@@ -40,14 +40,15 @@ class BrowserGpuMemoryBufferManager;
class BrowserMainLoop;
class BrowserProcessSubThread;
class BrowserShutdownProfileDumper;
-class BrowserSurfaceViewManager;
class BrowserTestBase;
class CategorizedWorkerPool;
+class GpuProcessTransportFactory;
class NestedMessagePumpAndroid;
class ScopedAllowWaitForAndroidLayoutTests;
class ScopedAllowWaitForDebugURL;
class SessionStorageDatabase;
class SoftwareOutputDeviceMus;
+class ServiceWorkerSubresourceLoader;
class SynchronousCompositor;
class SynchronousCompositorHost;
class SynchronousCompositorSyncCallBridge;
@@ -83,7 +84,7 @@ class TaskService; // https://crbug.com/796830
namespace mojo {
class CoreLibraryInitializer;
class SyncCallRestrictions;
-namespace edk {
+namespace core {
class ScopedIPCSupport;
}
}
@@ -129,7 +130,7 @@ class ScreenMus;
}
namespace viz {
-class ServerGpuMemoryBufferManager;
+class HostGpuMemoryBufferManager;
}
namespace webrtc {
@@ -213,6 +214,35 @@ class BASE_EXPORT ScopedDisallowBlocking {
//
// Avoid using this. Prefer making blocking calls from tasks posted to
// base::TaskScheduler with base::MayBlock().
+//
+// Where unavoidable, put ScopedAllow* instances in the narrowest scope possible
+// in the caller making the blocking call but no further down. That is: if a
+// Cleanup() method needs to do a blocking call, document Cleanup() as blocking
+// and add a ScopedAllowBlocking instance in callers that can't avoid making
+// this call from a context where blocking is banned, as such:
+// void Client::MyMethod() {
+// (...)
+// {
+// // Blocking is okay here because XYZ.
+// ScopedAllowBlocking allow_blocking;
+// my_foo_->Cleanup();
+// }
+// (...)
+// }
+//
+// // This method can block.
+// void Foo::Cleanup() {
+// // Do NOT add the ScopedAllowBlocking in Cleanup() directly as that hides
+// // its blocking nature from unknowing callers and defeats the purpose of
+// // these checks.
+// FlushStateToDisk();
+// }
+//
+// Note: In rare situations where the blocking call is an implementation detail
+// (i.e. the impl makes a call that invokes AssertBlockingAllowed() but it
+// somehow knows that in practice this will not block), it might be okay to hide
+// the ScopedAllowBlocking instance in the impl with a comment explaining why
+// that's okay.
class BASE_EXPORT ScopedAllowBlocking {
private:
// This can only be instantiated by friends. Use ScopedAllowBlockingForTesting
@@ -220,6 +250,7 @@ class BASE_EXPORT ScopedAllowBlocking {
FRIEND_TEST_ALL_PREFIXES(ThreadRestrictionsTest, ScopedAllowBlocking);
friend class android_webview::ScopedAllowInitGLBindings;
friend class content::BrowserProcessSubThread;
+ friend class content::GpuProcessTransportFactory;
friend class cronet::CronetPrefsManager;
friend class cronet::CronetURLRequestContext;
friend class media::AudioInputDevice;
@@ -292,10 +323,13 @@ class BASE_EXPORT ScopedAllowBaseSyncPrimitives {
friend class functions::ExecScriptScopedAllowBaseSyncPrimitives;
friend class leveldb::LevelDBMojoProxy;
friend class media::BlockingUrlProtocol;
+ friend class mojo::core::ScopedIPCSupport;
friend class net::MultiThreadedCertVerifierScopedAllowBaseSyncPrimitives;
friend class rlz_lib::FinancialPing;
friend class shell_integration::LaunchXdgUtilityScopedAllowBaseSyncPrimitives;
friend class webrtc::DesktopConfigurationMonitor;
+ friend class content::ServiceWorkerSubresourceLoader;
+ friend class viz::HostGpuMemoryBufferManager;
ScopedAllowBaseSyncPrimitives() EMPTY_BODY_IF_DCHECK_IS_OFF;
~ScopedAllowBaseSyncPrimitives() EMPTY_BODY_IF_DCHECK_IS_OFF;
@@ -416,14 +450,13 @@ class BASE_EXPORT ThreadRestrictions {
#endif
private:
- // DO NOT ADD ANY OTHER FRIEND STATEMENTS, talk to jam or brettw first.
+ // DO NOT ADD ANY OTHER FRIEND STATEMENTS.
// BEGIN ALLOWED USAGE.
friend class android_webview::AwFormDatabaseService;
friend class android_webview::CookieManager;
friend class base::StackSamplingProfiler;
friend class content::BrowserMainLoop;
friend class content::BrowserShutdownProfileDumper;
- friend class content::BrowserSurfaceViewManager;
friend class content::BrowserTestBase;
friend class content::NestedMessagePumpAndroid;
friend class content::ScopedAllowWaitForAndroidLayoutTests;
@@ -442,7 +475,6 @@ class BASE_EXPORT ThreadRestrictions {
friend class PlatformThread;
friend class android::JavaHandlerThread;
friend class mojo::SyncCallRestrictions;
- friend class mojo::edk::ScopedIPCSupport;
friend class ui::CommandBufferClientImpl;
friend class ui::CommandBufferLocal;
friend class ui::GpuState;
@@ -469,7 +501,6 @@ class BASE_EXPORT ThreadRestrictions {
friend class content::SoftwareOutputDeviceMus; // Interim non-production code
#endif
friend class views::ScreenMus;
- friend class viz::ServerGpuMemoryBufferManager;
// END USAGE THAT NEEDS TO BE FIXED.
#if DCHECK_IS_ON()
@@ -481,8 +512,7 @@ class BASE_EXPORT ThreadRestrictions {
// Constructing a ScopedAllowWait temporarily allows waiting on the current
// thread. Doing this is almost always incorrect, which is why we limit who
- // can use this through friend. If you find yourself needing to use this, find
- // another way. Talk to jam or brettw.
+ // can use this through friend.
//
// DEPRECATED. Use ScopedAllowBaseSyncPrimitives.
class BASE_EXPORT ScopedAllowWait {
diff --git a/chromium/base/time/time_unittest.cc b/chromium/base/time/time_unittest.cc
index 63270f29a6d..2c106e54f2f 100644
--- a/chromium/base/time/time_unittest.cc
+++ b/chromium/base/time/time_unittest.cc
@@ -989,14 +989,7 @@ TEST(ThreadTicks, MAYBE_NowOverride) {
EXPECT_GT(ThreadTicks::Max(), subtle::ThreadTicksNowIgnoringOverride());
}
-// Fails frequently on Android http://crbug.com/352633 with:
-// Expected: (delta_thread.InMicroseconds()) > (0), actual: 0 vs 0
-#if defined(OS_ANDROID)
-#define MAYBE_ThreadNow DISABLED_ThreadNow
-#else
-#define MAYBE_ThreadNow ThreadNow
-#endif
-TEST(ThreadTicks, MAYBE_ThreadNow) {
+TEST(ThreadTicks, ThreadNow) {
if (ThreadTicks::IsSupported()) {
ThreadTicks::WaitUntilInitialized();
TimeTicks begin = TimeTicks::Now();
@@ -1010,7 +1003,7 @@ TEST(ThreadTicks, MAYBE_ThreadNow) {
TimeDelta delta = end - begin;
TimeDelta delta_thread = end_thread - begin_thread;
// Make sure that some thread time have elapsed.
- EXPECT_GT(delta_thread.InMicroseconds(), 0);
+ EXPECT_GE(delta_thread.InMicroseconds(), 0);
// But the thread time is at least 9ms less than clock time.
TimeDelta difference = delta - delta_thread;
EXPECT_GE(difference.InMicroseconds(), 9000);
diff --git a/chromium/base/time/time_win_unittest.cc b/chromium/base/time/time_win_unittest.cc
index 24cd7313dfe..4d712544f0a 100644
--- a/chromium/base/time/time_win_unittest.cc
+++ b/chromium/base/time/time_win_unittest.cc
@@ -294,9 +294,15 @@ TEST(TimeTicks, FromQPCValue) {
double epsilon = nextafter(expected_microseconds_since_origin, INFINITY) -
expected_microseconds_since_origin;
// Epsilon must be at least 1.0 because converted_microseconds_since_origin
- // comes from an integral value and the rounding is not perfect.
- if (epsilon < 1.0)
- epsilon = 1.0;
+ // comes from an integral value, and expected_microseconds_since_origin is
+ // a double that is expected to be up to 0.999 larger. In addition, due to
+ // multiple roundings in the double calculation the actual error can be
+ // slightly larger than 1.0, even when the converted value is perfect. This
+ // epsilon value was chosen because it is slightly larger than the error
+ // seen in a test failure caused by the double rounding.
+ const double min_epsilon = 1.002;
+ if (epsilon < min_epsilon)
+ epsilon = min_epsilon;
EXPECT_NEAR(expected_microseconds_since_origin,
converted_microseconds_since_origin, epsilon)
<< "ticks=" << ticks << ", to be converted via logic path: "
diff --git a/chromium/base/timer/mock_timer.cc b/chromium/base/timer/mock_timer.cc
index ca0893ba0a1..e55bf144486 100644
--- a/chromium/base/timer/mock_timer.cc
+++ b/chromium/base/timer/mock_timer.cc
@@ -4,56 +4,80 @@
#include "base/timer/mock_timer.h"
+#include "base/test/test_simple_task_runner.h"
+
namespace base {
-MockTimer::MockTimer(bool retain_user_task, bool is_repeating)
- : Timer(retain_user_task, is_repeating),
- is_running_(false) {
+namespace {
+
+void FlushPendingTasks(TestSimpleTaskRunner* task_runner) {
+ // Do not use TestSimpleTaskRunner::RunPendingTasks() here. As RunPendingTasks
+ // overrides ThreadTaskRunnerHandle when it runs tasks, tasks posted by timer
+ // tasks to TTRH go to |test_task_runner_|, though they should be posted to
+ // the original task runner.
+ // Do not use TestSimpleTaskRunner::RunPendingTasks(), as its overridden
+ // ThreadTaskRunnerHandle causes unexpected side effects.
+ for (TestPendingTask& task : task_runner->TakePendingTasks())
+ std::move(task.task).Run();
+}
+
+} // namespace
+
+MockOneShotTimer::MockOneShotTimer()
+ : OneShotTimer(&clock_),
+ test_task_runner_(MakeRefCounted<TestSimpleTaskRunner>()) {
+ OneShotTimer::SetTaskRunner(test_task_runner_);
}
-MockTimer::MockTimer(const Location& posted_from,
- TimeDelta delay,
- const base::Closure& user_task,
- bool is_repeating)
- : Timer(true, is_repeating), delay_(delay), is_running_(false) {}
+MockOneShotTimer::~MockOneShotTimer() = default;
-MockTimer::~MockTimer() = default;
+void MockOneShotTimer::SetTaskRunner(
+ scoped_refptr<SequencedTaskRunner> task_runner) {
+ NOTREACHED() << "MockOneShotTimer doesn't support SetTaskRunner().";
+}
-bool MockTimer::IsRunning() const {
- return is_running_;
+void MockOneShotTimer::Fire() {
+ DCHECK(IsRunning());
+ clock_.Advance(std::max(TimeDelta(), desired_run_time() - clock_.NowTicks()));
+ FlushPendingTasks(test_task_runner_.get());
}
-base::TimeDelta MockTimer::GetCurrentDelay() const {
- return delay_;
+MockRepeatingTimer::MockRepeatingTimer()
+ : RepeatingTimer(&clock_),
+ test_task_runner_(MakeRefCounted<TestSimpleTaskRunner>()) {
+ RepeatingTimer::SetTaskRunner(test_task_runner_);
}
-void MockTimer::Start(const Location& posted_from,
- TimeDelta delay,
- const base::Closure& user_task) {
- delay_ = delay;
- user_task_ = user_task;
- Reset();
+MockRepeatingTimer::~MockRepeatingTimer() = default;
+
+void MockRepeatingTimer::SetTaskRunner(
+ scoped_refptr<SequencedTaskRunner> task_runner) {
+ NOTREACHED() << "MockRepeatingTimer doesn't support SetTaskRunner().";
}
-void MockTimer::Stop() {
- is_running_ = false;
- if (!retain_user_task())
- user_task_.Reset();
+void MockRepeatingTimer::Fire() {
+ DCHECK(IsRunning());
+ clock_.Advance(std::max(TimeDelta(), desired_run_time() - clock_.NowTicks()));
+ FlushPendingTasks(test_task_runner_.get());
}
-void MockTimer::Reset() {
- DCHECK(!user_task_.is_null());
- is_running_ = true;
+MockRetainingOneShotTimer::MockRetainingOneShotTimer()
+ : RetainingOneShotTimer(&clock_),
+ test_task_runner_(MakeRefCounted<TestSimpleTaskRunner>()) {
+ RetainingOneShotTimer::SetTaskRunner(test_task_runner_);
+}
+
+MockRetainingOneShotTimer::~MockRetainingOneShotTimer() = default;
+
+void MockRetainingOneShotTimer::SetTaskRunner(
+ scoped_refptr<SequencedTaskRunner> task_runner) {
+ NOTREACHED() << "MockRetainingOneShotTimer doesn't support SetTaskRunner().";
}
-void MockTimer::Fire() {
- DCHECK(is_running_);
- base::Closure old_task = user_task_;
- if (is_repeating())
- Reset();
- else
- Stop();
- old_task.Run();
+void MockRetainingOneShotTimer::Fire() {
+ DCHECK(IsRunning());
+ clock_.Advance(std::max(TimeDelta(), desired_run_time() - clock_.NowTicks()));
+ FlushPendingTasks(test_task_runner_.get());
}
} // namespace base
diff --git a/chromium/base/timer/mock_timer.h b/chromium/base/timer/mock_timer.h
index 49394b2d9c2..30a605e99ab 100644
--- a/chromium/base/timer/mock_timer.h
+++ b/chromium/base/timer/mock_timer.h
@@ -5,35 +5,70 @@
#ifndef BASE_TIMER_MOCK_TIMER_H_
#define BASE_TIMER_MOCK_TIMER_H_
+#include "base/test/simple_test_tick_clock.h"
#include "base/timer/timer.h"
namespace base {
-class BASE_EXPORT MockTimer : public Timer {
+class TestSimpleTaskRunner;
+
+// A mock implementation of base::OneShotTimer which requires being explicitly
+// Fire()'d.
+// Prefer using ScopedTaskEnvironment::MOCK_TIME + FastForward*() to this when
+// possible.
+class MockOneShotTimer : public OneShotTimer {
+ public:
+ MockOneShotTimer();
+ ~MockOneShotTimer() override;
+
+ // Testing method.
+ void Fire();
+
+ private:
+ // Timer implementation.
+ // MockOneShotTimer doesn't support SetTaskRunner. Do not use this.
+ void SetTaskRunner(scoped_refptr<SequencedTaskRunner> task_runner) override;
+
+ SimpleTestTickClock clock_;
+ scoped_refptr<TestSimpleTaskRunner> test_task_runner_;
+};
+
+// See MockOneShotTimer's comment. Prefer using
+// ScopedTaskEnvironment::MOCK_TIME.
+class MockRepeatingTimer : public RepeatingTimer {
public:
- MockTimer(bool retain_user_task, bool is_repeating);
- MockTimer(const Location& posted_from,
- TimeDelta delay,
- const base::Closure& user_task,
- bool is_repeating);
- ~MockTimer() override;
-
- // base::Timer implementation.
- bool IsRunning() const override;
- base::TimeDelta GetCurrentDelay() const override;
- void Start(const Location& posted_from,
- base::TimeDelta delay,
- const base::Closure& user_task) override;
- void Stop() override;
- void Reset() override;
-
- // Testing methods.
+ MockRepeatingTimer();
+ ~MockRepeatingTimer() override;
+
+ // Testing method.
void Fire();
private:
- base::Closure user_task_;
- TimeDelta delay_;
- bool is_running_;
+ // Timer implementation.
+ // MockRepeatingTimer doesn't support SetTaskRunner. Do not use this.
+ void SetTaskRunner(scoped_refptr<SequencedTaskRunner> task_runner) override;
+
+ SimpleTestTickClock clock_;
+ scoped_refptr<TestSimpleTaskRunner> test_task_runner_;
+};
+
+// See MockOneShotTimer's comment. Prefer using
+// ScopedTaskEnvironment::MOCK_TIME.
+class MockRetainingOneShotTimer : public RetainingOneShotTimer {
+ public:
+ MockRetainingOneShotTimer();
+ ~MockRetainingOneShotTimer() override;
+
+ // Testing method.
+ void Fire();
+
+ private:
+ // Timer implementation.
+ // MockRetainingOneShotTimer doesn't support SetTaskRunner. Do not use this.
+ void SetTaskRunner(scoped_refptr<SequencedTaskRunner> task_runner) override;
+
+ SimpleTestTickClock clock_;
+ scoped_refptr<TestSimpleTaskRunner> test_task_runner_;
};
} // namespace base
diff --git a/chromium/base/timer/mock_timer_unittest.cc b/chromium/base/timer/mock_timer_unittest.cc
index 61716a4731f..45d0388e659 100644
--- a/chromium/base/timer/mock_timer_unittest.cc
+++ b/chromium/base/timer/mock_timer_unittest.cc
@@ -15,7 +15,7 @@ void CallMeMaybe(int *number) {
TEST(MockTimerTest, FiresOnce) {
int calls = 0;
- base::MockTimer timer(false, false);
+ base::MockOneShotTimer timer;
base::TimeDelta delay = base::TimeDelta::FromSeconds(2);
timer.Start(FROM_HERE, delay,
base::Bind(&CallMeMaybe,
@@ -29,7 +29,7 @@ TEST(MockTimerTest, FiresOnce) {
TEST(MockTimerTest, FiresRepeatedly) {
int calls = 0;
- base::MockTimer timer(true, true);
+ base::MockRepeatingTimer timer;
base::TimeDelta delay = base::TimeDelta::FromSeconds(2);
timer.Start(FROM_HERE, delay,
base::Bind(&CallMeMaybe,
@@ -44,7 +44,7 @@ TEST(MockTimerTest, FiresRepeatedly) {
TEST(MockTimerTest, Stops) {
int calls = 0;
- base::MockTimer timer(true, true);
+ base::MockRepeatingTimer timer;
base::TimeDelta delay = base::TimeDelta::FromSeconds(2);
timer.Start(FROM_HERE, delay,
base::Bind(&CallMeMaybe,
@@ -66,7 +66,7 @@ class HasWeakPtr : public base::SupportsWeakPtr<HasWeakPtr> {
TEST(MockTimerTest, DoesNotRetainClosure) {
HasWeakPtr *has_weak_ptr = new HasWeakPtr();
base::WeakPtr<HasWeakPtr> weak_ptr(has_weak_ptr->AsWeakPtr());
- base::MockTimer timer(false, false);
+ base::MockOneShotTimer timer;
base::TimeDelta delay = base::TimeDelta::FromSeconds(2);
ASSERT_TRUE(weak_ptr.get());
timer.Start(FROM_HERE, delay,
diff --git a/chromium/base/timer/timer.cc b/chromium/base/timer/timer.cc
index 99cd83933aa..aa2e0d4ff66 100644
--- a/chromium/base/timer/timer.cc
+++ b/chromium/base/timer/timer.cc
@@ -16,6 +16,7 @@
#include "base/time/tick_clock.h"
namespace base {
+namespace internal {
// BaseTimerTaskInternal is a simple delegate for scheduling a callback to Timer
// on the current sequence. It also handles the following edge cases:
@@ -23,9 +24,7 @@ namespace base {
// - abandoned (orphaned) by Timer.
class BaseTimerTaskInternal {
public:
- explicit BaseTimerTaskInternal(Timer* timer)
- : timer_(timer) {
- }
+ explicit BaseTimerTaskInternal(TimerBase* timer) : timer_(timer) {}
~BaseTimerTaskInternal() {
// This task may be getting cleared because the task runner has been
@@ -45,7 +44,7 @@ class BaseTimerTaskInternal {
// Although Timer should not call back into |this|, let's clear |timer_|
// first to be pedantic.
- Timer* timer = timer_;
+ TimerBase* timer = timer_;
timer_ = nullptr;
timer->RunScheduledTask();
}
@@ -54,17 +53,17 @@ class BaseTimerTaskInternal {
void Abandon() { timer_ = nullptr; }
private:
- Timer* timer_;
+ TimerBase* timer_;
DISALLOW_COPY_AND_ASSIGN(BaseTimerTaskInternal);
};
-Timer::Timer(bool retain_user_task, bool is_repeating)
- : Timer(retain_user_task, is_repeating, nullptr) {}
+TimerBase::TimerBase(bool retain_user_task, bool is_repeating)
+ : TimerBase(retain_user_task, is_repeating, nullptr) {}
-Timer::Timer(bool retain_user_task,
- bool is_repeating,
- const TickClock* tick_clock)
+TimerBase::TimerBase(bool retain_user_task,
+ bool is_repeating,
+ const TickClock* tick_clock)
: scheduled_task_(nullptr),
is_repeating_(is_repeating),
retain_user_task_(retain_user_task),
@@ -77,17 +76,17 @@ Timer::Timer(bool retain_user_task,
origin_sequence_checker_.DetachFromSequence();
}
-Timer::Timer(const Location& posted_from,
- TimeDelta delay,
- const base::Closure& user_task,
- bool is_repeating)
- : Timer(posted_from, delay, user_task, is_repeating, nullptr) {}
-
-Timer::Timer(const Location& posted_from,
- TimeDelta delay,
- const base::Closure& user_task,
- bool is_repeating,
- const TickClock* tick_clock)
+TimerBase::TimerBase(const Location& posted_from,
+ TimeDelta delay,
+ const base::Closure& user_task,
+ bool is_repeating)
+ : TimerBase(posted_from, delay, user_task, is_repeating, nullptr) {}
+
+TimerBase::TimerBase(const Location& posted_from,
+ TimeDelta delay,
+ const base::Closure& user_task,
+ bool is_repeating,
+ const TickClock* tick_clock)
: scheduled_task_(nullptr),
posted_from_(posted_from),
delay_(delay),
@@ -100,22 +99,22 @@ Timer::Timer(const Location& posted_from,
origin_sequence_checker_.DetachFromSequence();
}
-Timer::~Timer() {
+TimerBase::~TimerBase() {
DCHECK(origin_sequence_checker_.CalledOnValidSequence());
AbandonAndStop();
}
-bool Timer::IsRunning() const {
+bool TimerBase::IsRunning() const {
DCHECK(origin_sequence_checker_.CalledOnValidSequence());
return is_running_;
}
-TimeDelta Timer::GetCurrentDelay() const {
+TimeDelta TimerBase::GetCurrentDelay() const {
DCHECK(origin_sequence_checker_.CalledOnValidSequence());
return delay_;
}
-void Timer::SetTaskRunner(scoped_refptr<SequencedTaskRunner> task_runner) {
+void TimerBase::SetTaskRunner(scoped_refptr<SequencedTaskRunner> task_runner) {
// Do not allow changing the task runner when the Timer is running.
// Don't check for |origin_sequence_checker_.CalledOnValidSequence()| here to
// allow the use case of constructing the Timer and immediatetly invoking
@@ -127,9 +126,9 @@ void Timer::SetTaskRunner(scoped_refptr<SequencedTaskRunner> task_runner) {
task_runner_.swap(task_runner);
}
-void Timer::Start(const Location& posted_from,
- TimeDelta delay,
- const base::Closure& user_task) {
+void TimerBase::Start(const Location& posted_from,
+ TimeDelta delay,
+ const base::Closure& user_task) {
DCHECK(origin_sequence_checker_.CalledOnValidSequence());
posted_from_ = posted_from;
@@ -139,7 +138,7 @@ void Timer::Start(const Location& posted_from,
Reset();
}
-void Timer::Stop() {
+void TimerBase::Stop() {
// TODO(gab): Enable this when it's no longer called racily from
// RunScheduledTask(): https://crbug.com/587199.
// DCHECK(origin_sequence_checker_.CalledOnValidSequence());
@@ -155,7 +154,7 @@ void Timer::Stop() {
// |user_task_|.
}
-void Timer::Reset() {
+void TimerBase::Reset() {
DCHECK(origin_sequence_checker_.CalledOnValidSequence());
DCHECK(!user_task_.is_null());
@@ -183,14 +182,14 @@ void Timer::Reset() {
PostNewScheduledTask(delay_);
}
-TimeTicks Timer::Now() const {
+TimeTicks TimerBase::Now() const {
// TODO(gab): Enable this when it's no longer called racily from
// RunScheduledTask(): https://crbug.com/587199.
// DCHECK(origin_sequence_checker_.CalledOnValidSequence());
return tick_clock_ ? tick_clock_->NowTicks() : TimeTicks::Now();
}
-void Timer::PostNewScheduledTask(TimeDelta delay) {
+void TimerBase::PostNewScheduledTask(TimeDelta delay) {
// TODO(gab): Enable this when it's no longer called racily from
// RunScheduledTask(): https://crbug.com/587199.
// DCHECK(origin_sequence_checker_.CalledOnValidSequence());
@@ -214,11 +213,11 @@ void Timer::PostNewScheduledTask(TimeDelta delay) {
}
}
-scoped_refptr<SequencedTaskRunner> Timer::GetTaskRunner() {
+scoped_refptr<SequencedTaskRunner> TimerBase::GetTaskRunner() {
return task_runner_.get() ? task_runner_ : SequencedTaskRunnerHandle::Get();
}
-void Timer::AbandonScheduledTask() {
+void TimerBase::AbandonScheduledTask() {
// TODO(gab): Enable this when it's no longer called racily from
// RunScheduledTask() -> Stop(): https://crbug.com/587199.
// DCHECK(origin_sequence_checker_.CalledOnValidSequence());
@@ -228,7 +227,7 @@ void Timer::AbandonScheduledTask() {
}
}
-void Timer::RunScheduledTask() {
+void TimerBase::RunScheduledTask() {
// TODO(gab): Enable this when it's no longer called racily:
// https://crbug.com/587199.
// DCHECK(origin_sequence_checker_.CalledOnValidSequence());
@@ -265,4 +264,17 @@ void Timer::RunScheduledTask() {
// No more member accesses here: |this| could be deleted at this point.
}
+} // namespace internal
+
+void OneShotTimer::FireNow() {
+ DCHECK(origin_sequence_checker_.CalledOnValidSequence());
+ DCHECK(!task_runner_) << "FireNow() is incompatible with SetTaskRunner()";
+ DCHECK(IsRunning());
+
+ OnceClosure task = user_task();
+ Stop();
+ DCHECK(!user_task());
+ std::move(task).Run();
+}
+
} // namespace base
diff --git a/chromium/base/timer/timer.h b/chromium/base/timer/timer.h
index 27776326d79..3f666b6375c 100644
--- a/chromium/base/timer/timer.h
+++ b/chromium/base/timer/timer.h
@@ -2,16 +2,20 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// OneShotTimer and RepeatingTimer provide a simple timer API. As the names
-// suggest, OneShotTimer calls you back once after a time delay expires.
+// OneShotTimer, RepeatingTimer and RetainingOneShotTimer provide a simple timer
+// API. As the names suggest, OneShotTimer calls you back once after a time
+// delay expires.
// RepeatingTimer on the other hand calls you back periodically with the
// prescribed time interval.
+// RetainingOneShotTimer doesn't repeat the task itself like OneShotTimer, but
+// retains the given task after the time out. You can restart it with Reset
+// again without giving new task to Start.
//
-// OneShotTimer and RepeatingTimer both cancel the timer when they go out of
-// scope, which makes it easy to ensure that you do not get called when your
-// object has gone out of scope. Just instantiate a OneShotTimer or
-// RepeatingTimer as a member variable of the class for which you wish to
-// receive timer events.
+// All of OneShotTimer, RepeatingTimer and RetainingOneShotTimer cancel the
+// timer when they go out of scope, which makes it easy to ensure that you do
+// not get called when your object has gone out of scope. Just instantiate a
+// timer as a member variable of the class for which you wish to receive timer
+// events.
//
// Sample RepeatingTimer usage:
//
@@ -32,12 +36,11 @@
// base::RepeatingTimer timer_;
// };
//
-// Both OneShotTimer and RepeatingTimer also support a Reset method, which
-// allows you to easily defer the timer event until the timer delay passes once
-// again. So, in the above example, if 0.5 seconds have already passed,
-// calling Reset on |timer_| would postpone DoStuff by another 1 second. In
-// other words, Reset is shorthand for calling Stop and then Start again with
-// the same arguments.
+// Timers also support a Reset method, which allows you to easily defer the
+// timer event until the timer delay passes once again. So, in the above
+// example, if 0.5 seconds have already passed, calling Reset on |timer_|
+// would postpone DoStuff by another 1 second. In other words, Reset is
+// shorthand for calling Stop and then Start again with the same arguments.
//
// These APIs are not thread safe. All methods must be called from the same
// sequence (not necessarily the construction sequence), except for the
@@ -75,48 +78,55 @@
namespace base {
-class BaseTimerTaskInternal;
class TickClock;
+namespace internal {
+
+class BaseTimerTaskInternal;
+
//-----------------------------------------------------------------------------
// This class wraps TaskRunner::PostDelayedTask to manage delayed and repeating
// tasks. See meta comment above for thread-safety requirements.
+// Do not use this class directly. Use one of OneShotTimer, RepeatingTimer or
+// RetainingOneShotTimer.
//
-class BASE_EXPORT Timer {
+class BASE_EXPORT TimerBase {
public:
// Construct a timer in repeating or one-shot mode. Start must be called later
// to set task info. |retain_user_task| determines whether the user_task is
// retained or reset when it runs or stops. If |tick_clock| is provided, it is
// used instead of TimeTicks::Now() to get TimeTicks when scheduling tasks.
- Timer(bool retain_user_task, bool is_repeating);
- Timer(bool retain_user_task, bool is_repeating, const TickClock* tick_clock);
+ TimerBase(bool retain_user_task, bool is_repeating);
+ TimerBase(bool retain_user_task,
+ bool is_repeating,
+ const TickClock* tick_clock);
// Construct a timer with retained task info. If |tick_clock| is provided, it
// is used instead of TimeTicks::Now() to get TimeTicks when scheduling tasks.
- Timer(const Location& posted_from,
- TimeDelta delay,
- const base::Closure& user_task,
- bool is_repeating);
- Timer(const Location& posted_from,
- TimeDelta delay,
- const base::Closure& user_task,
- bool is_repeating,
- const TickClock* tick_clock);
-
- virtual ~Timer();
+ TimerBase(const Location& posted_from,
+ TimeDelta delay,
+ const base::Closure& user_task,
+ bool is_repeating);
+ TimerBase(const Location& posted_from,
+ TimeDelta delay,
+ const base::Closure& user_task,
+ bool is_repeating,
+ const TickClock* tick_clock);
+
+ virtual ~TimerBase();
// Returns true if the timer is running (i.e., not stopped).
- virtual bool IsRunning() const;
+ bool IsRunning() const;
// Returns the current delay for this timer.
- virtual TimeDelta GetCurrentDelay() const;
+ TimeDelta GetCurrentDelay() const;
// Set the task runner on which the task should be scheduled. This method can
// only be called before any tasks have been scheduled. If |task_runner| runs
// tasks on a different sequence than the sequence owning this Timer,
// |user_task_| will be posted to it when the Timer fires (note that this
// means |user_task_| can run after ~Timer() and should support that).
- void SetTaskRunner(scoped_refptr<SequencedTaskRunner> task_runner);
+ virtual void SetTaskRunner(scoped_refptr<SequencedTaskRunner> task_runner);
// Start the timer to run at the given |delay| from now. If the timer is
// already running, it will be replaced to call the given |user_task|.
@@ -164,9 +174,15 @@ class BASE_EXPORT Timer {
void set_is_running(bool running) { is_running_ = running; }
const Location& posted_from() const { return posted_from_; }
- bool retain_user_task() const { return retain_user_task_; }
- bool is_repeating() const { return is_repeating_; }
- bool is_running() const { return is_running_; }
+
+ // The task runner on which the task should be scheduled. If it is null, the
+ // task runner for the current sequence will be used.
+ scoped_refptr<SequencedTaskRunner> task_runner_;
+
+ // Timer isn't thread-safe and must only be used on its origin sequence
+ // (sequence on which it was started). Once fully Stop()'ed it may be
+ // destroyed or restarted on another sequence.
+ SequenceChecker origin_sequence_checker_;
private:
friend class BaseTimerTaskInternal;
@@ -192,10 +208,6 @@ class BASE_EXPORT Timer {
// at |scheduled_run_time_|.
BaseTimerTaskInternal* scheduled_task_;
- // The task runner on which the task should be scheduled. If it is null, the
- // task runner for the current sequence will be used.
- scoped_refptr<SequencedTaskRunner> task_runner_;
-
// Location in user code.
Location posted_from_;
// Delay requested by user.
@@ -216,11 +228,6 @@ class BASE_EXPORT Timer {
// if the task must be run immediately.
TimeTicks desired_run_time_;
- // Timer isn't thread-safe and must only be used on its origin sequence
- // (sequence on which it was started). Once fully Stop()'ed it may be
- // destroyed or restarted on another sequence.
- SequenceChecker origin_sequence_checker_;
-
// Repeating timers automatically post the task again before calling the task
// callback.
const bool is_repeating_;
@@ -234,25 +241,69 @@ class BASE_EXPORT Timer {
// If true, |user_task_| is scheduled to run sometime in the future.
bool is_running_;
- DISALLOW_COPY_AND_ASSIGN(Timer);
+ DISALLOW_COPY_AND_ASSIGN(TimerBase);
};
+} // namespace internal
+
//-----------------------------------------------------------------------------
// A simple, one-shot timer. See usage notes at the top of the file.
-class OneShotTimer : public Timer {
+class BASE_EXPORT OneShotTimer : public internal::TimerBase {
public:
OneShotTimer() : OneShotTimer(nullptr) {}
explicit OneShotTimer(const TickClock* tick_clock)
- : Timer(false, false, tick_clock) {}
+ : internal::TimerBase(false, false, tick_clock) {}
+
+ // Run the scheduled task immediately, and stop the timer. The timer needs to
+ // be running.
+ void FireNow();
};
//-----------------------------------------------------------------------------
// A simple, repeating timer. See usage notes at the top of the file.
-class RepeatingTimer : public Timer {
+class RepeatingTimer : public internal::TimerBase {
public:
RepeatingTimer() : RepeatingTimer(nullptr) {}
explicit RepeatingTimer(const TickClock* tick_clock)
- : Timer(true, true, tick_clock) {}
+ : internal::TimerBase(true, true, tick_clock) {}
+
+ RepeatingTimer(const Location& posted_from,
+ TimeDelta delay,
+ RepeatingClosure user_task)
+ : internal::TimerBase(posted_from, delay, std::move(user_task), true) {}
+ RepeatingTimer(const Location& posted_from,
+ TimeDelta delay,
+ RepeatingClosure user_task,
+ const TickClock* tick_clock)
+ : internal::TimerBase(posted_from,
+ delay,
+ std::move(user_task),
+ true,
+ tick_clock) {}
+};
+
+//-----------------------------------------------------------------------------
+// A simple, one-shot timer with the retained user task. See usage notes at the
+// top of the file.
+class RetainingOneShotTimer : public internal::TimerBase {
+ public:
+ RetainingOneShotTimer() : RetainingOneShotTimer(nullptr) {}
+ explicit RetainingOneShotTimer(const TickClock* tick_clock)
+ : internal::TimerBase(true, false, tick_clock) {}
+
+ RetainingOneShotTimer(const Location& posted_from,
+ TimeDelta delay,
+ RepeatingClosure user_task)
+ : internal::TimerBase(posted_from, delay, std::move(user_task), false) {}
+ RetainingOneShotTimer(const Location& posted_from,
+ TimeDelta delay,
+ RepeatingClosure user_task,
+ const TickClock* tick_clock)
+ : internal::TimerBase(posted_from,
+ delay,
+ std::move(user_task),
+ false,
+ tick_clock) {}
};
//-----------------------------------------------------------------------------
@@ -266,7 +317,7 @@ class RepeatingTimer : public Timer {
//
// If destroyed, the timeout is canceled and will not occur even if already
// inflight.
-class DelayTimer : protected Timer {
+class DelayTimer {
public:
template <class Receiver>
DelayTimer(const Location& posted_from,
@@ -281,13 +332,17 @@ class DelayTimer : protected Timer {
Receiver* receiver,
void (Receiver::*method)(),
const TickClock* tick_clock)
- : Timer(posted_from,
- delay,
- base::Bind(method, base::Unretained(receiver)),
- false,
- tick_clock) {}
+ : timer_(posted_from,
+ delay,
+ BindRepeating(method, Unretained(receiver)),
+ tick_clock) {}
+
+ void Reset() { timer_.Reset(); }
+
+ private:
+ RetainingOneShotTimer timer_;
- using Timer::Reset;
+ DISALLOW_COPY_AND_ASSIGN(DelayTimer);
};
} // namespace base
diff --git a/chromium/base/timer/timer_unittest.cc b/chromium/base/timer/timer_unittest.cc
index aaab237d11f..3ebf301347c 100644
--- a/chromium/base/timer/timer_unittest.cc
+++ b/chromium/base/timer/timer_unittest.cc
@@ -603,7 +603,7 @@ void TimerTestCallback() {
TEST(TimerTest, NonRepeatIsRunning) {
{
MessageLoop loop;
- Timer timer(false, false);
+ OneShotTimer timer;
EXPECT_FALSE(timer.IsRunning());
timer.Start(FROM_HERE, TimeDelta::FromDays(1), Bind(&TimerTestCallback));
EXPECT_TRUE(timer.IsRunning());
@@ -613,7 +613,7 @@ TEST(TimerTest, NonRepeatIsRunning) {
}
{
- Timer timer(true, false);
+ RetainingOneShotTimer timer;
MessageLoop loop;
EXPECT_FALSE(timer.IsRunning());
timer.Start(FROM_HERE, TimeDelta::FromDays(1), Bind(&TimerTestCallback));
@@ -627,7 +627,7 @@ TEST(TimerTest, NonRepeatIsRunning) {
}
TEST(TimerTest, NonRepeatMessageLoopDeath) {
- Timer timer(false, false);
+ OneShotTimer timer;
{
MessageLoop loop;
EXPECT_FALSE(timer.IsRunning());
@@ -640,8 +640,8 @@ TEST(TimerTest, NonRepeatMessageLoopDeath) {
TEST(TimerTest, RetainRepeatIsRunning) {
MessageLoop loop;
- Timer timer(FROM_HERE, TimeDelta::FromDays(1), Bind(&TimerTestCallback),
- true);
+ RepeatingTimer timer(FROM_HERE, TimeDelta::FromDays(1),
+ Bind(&TimerTestCallback));
EXPECT_FALSE(timer.IsRunning());
timer.Reset();
EXPECT_TRUE(timer.IsRunning());
@@ -653,8 +653,8 @@ TEST(TimerTest, RetainRepeatIsRunning) {
TEST(TimerTest, RetainNonRepeatIsRunning) {
MessageLoop loop;
- Timer timer(FROM_HERE, TimeDelta::FromDays(1), Bind(&TimerTestCallback),
- false);
+ RetainingOneShotTimer timer(FROM_HERE, TimeDelta::FromDays(1),
+ Bind(&TimerTestCallback));
EXPECT_FALSE(timer.IsRunning());
timer.Reset();
EXPECT_TRUE(timer.IsRunning());
@@ -692,7 +692,7 @@ TEST(TimerTest, ContinuationStopStart) {
{
ClearAllCallbackHappened();
MessageLoop loop;
- Timer timer(false, false);
+ OneShotTimer timer;
timer.Start(FROM_HERE, TimeDelta::FromMilliseconds(10),
Bind(&SetCallbackHappened1));
timer.Stop();
@@ -708,7 +708,7 @@ TEST(TimerTest, ContinuationReset) {
{
ClearAllCallbackHappened();
MessageLoop loop;
- Timer timer(false, false);
+ OneShotTimer timer;
timer.Start(FROM_HERE, TimeDelta::FromMilliseconds(10),
Bind(&SetCallbackHappened1));
timer.Reset();
diff --git a/chromium/base/trace_event/heap_profiler_heap_dump_writer.cc b/chromium/base/trace_event/heap_profiler_heap_dump_writer.cc
deleted file mode 100644
index 71c3d97f544..00000000000
--- a/chromium/base/trace_event/heap_profiler_heap_dump_writer.cc
+++ /dev/null
@@ -1,323 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/trace_event/heap_profiler_heap_dump_writer.h"
-
-#include <stdint.h>
-
-#include <algorithm>
-#include <iterator>
-#include <tuple>
-#include <utility>
-#include <vector>
-
-#include "base/format_macros.h"
-#include "base/logging.h"
-#include "base/macros.h"
-#include "base/strings/stringprintf.h"
-#include "base/trace_event/heap_profiler_serialization_state.h"
-#include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
-#include "base/trace_event/heap_profiler_type_name_deduplicator.h"
-#include "base/trace_event/trace_config.h"
-#include "base/trace_event/trace_event.h"
-#include "base/trace_event/trace_event_argument.h"
-#include "base/trace_event/trace_log.h"
-
-// Most of what the |HeapDumpWriter| does is aggregating detailed information
-// about the heap and deciding what to dump. The Input to this process is a list
-// of |AllocationContext|s and size pairs.
-//
-// The pairs are grouped into |Bucket|s. A bucket is a group of (context, size)
-// pairs where the properties of the contexts share a prefix. (Type name is
-// considered a list of length one here.) First all pairs are put into one
-// bucket that represents the entire heap. Then this bucket is recursively
-// broken down into smaller buckets. Each bucket keeps track of whether further
-// breakdown is possible.
-
-namespace base {
-namespace trace_event {
-namespace internal {
-namespace {
-
-// Denotes a property of |AllocationContext| to break down by.
-enum class BreakDownMode { kByBacktrace, kByTypeName };
-
-// A group of bytes for which the context shares a prefix.
-struct Bucket {
- Bucket()
- : size(0),
- count(0),
- backtrace_cursor(0),
- is_broken_down_by_type_name(false) {}
-
- std::vector<std::pair<const AllocationContext*, AllocationMetrics>>
- metrics_by_context;
-
- // The sum of the sizes of |metrics_by_context|.
- size_t size;
-
- // The sum of number of allocations of |metrics_by_context|.
- size_t count;
-
- // The index of the stack frame that has not yet been broken down by. For all
- // elements in this bucket, the stack frames 0 up to (but not including) the
- // cursor, must be equal.
- size_t backtrace_cursor;
-
- // When true, the type name for all elements in this bucket must be equal.
- bool is_broken_down_by_type_name;
-};
-
-// Comparison operator to order buckets by their size.
-bool operator<(const Bucket& lhs, const Bucket& rhs) {
- return lhs.size < rhs.size;
-}
-
-// Groups the allocations in the bucket by |break_by|. The buckets in the
-// returned list will have |backtrace_cursor| advanced or
-// |is_broken_down_by_type_name| set depending on the property to group by.
-std::vector<Bucket> GetSubbuckets(const Bucket& bucket,
- BreakDownMode break_by) {
- std::unordered_map<const void*, Bucket> breakdown;
-
- if (break_by == BreakDownMode::kByBacktrace) {
- for (const auto& context_and_metrics : bucket.metrics_by_context) {
- const Backtrace& backtrace = context_and_metrics.first->backtrace;
- const StackFrame* begin = std::begin(backtrace.frames);
- const StackFrame* end = begin + backtrace.frame_count;
- const StackFrame* cursor = begin + bucket.backtrace_cursor;
-
- DCHECK_LE(cursor, end);
-
- if (cursor != end) {
- Bucket& subbucket = breakdown[cursor->value];
- subbucket.size += context_and_metrics.second.size;
- subbucket.count += context_and_metrics.second.count;
- subbucket.metrics_by_context.push_back(context_and_metrics);
- subbucket.backtrace_cursor = bucket.backtrace_cursor + 1;
- subbucket.is_broken_down_by_type_name =
- bucket.is_broken_down_by_type_name;
- DCHECK_GT(subbucket.size, 0u);
- DCHECK_GT(subbucket.count, 0u);
- }
- }
- } else if (break_by == BreakDownMode::kByTypeName) {
- if (!bucket.is_broken_down_by_type_name) {
- for (const auto& context_and_metrics : bucket.metrics_by_context) {
- const AllocationContext* context = context_and_metrics.first;
- Bucket& subbucket = breakdown[context->type_name];
- subbucket.size += context_and_metrics.second.size;
- subbucket.count += context_and_metrics.second.count;
- subbucket.metrics_by_context.push_back(context_and_metrics);
- subbucket.backtrace_cursor = bucket.backtrace_cursor;
- subbucket.is_broken_down_by_type_name = true;
- DCHECK_GT(subbucket.size, 0u);
- DCHECK_GT(subbucket.count, 0u);
- }
- }
- }
-
- std::vector<Bucket> buckets;
- buckets.reserve(breakdown.size());
- for (auto key_bucket : breakdown)
- buckets.push_back(key_bucket.second);
-
- return buckets;
-}
-
-// Breaks down the bucket by |break_by|. Returns only buckets that contribute
-// more than |min_size_bytes| to the total size. The long tail is omitted.
-std::vector<Bucket> BreakDownBy(const Bucket& bucket,
- BreakDownMode break_by,
- size_t min_size_bytes) {
- std::vector<Bucket> buckets = GetSubbuckets(bucket, break_by);
-
- // Ensure that |buckets| is a max-heap (the data structure, not memory heap),
- // so its front contains the largest bucket. Buckets should be iterated
- // ordered by size, but sorting the vector is overkill because the long tail
- // of small buckets will be discarded. By using a max-heap, the optimal case
- // where all but the first bucket are discarded is O(n). The worst case where
- // no bucket is discarded is doing a heap sort, which is O(n log n).
- std::make_heap(buckets.begin(), buckets.end());
-
- // Keep including buckets until adding one would increase the number of
- // bytes accounted for by |min_size_bytes|. The large buckets end up in
- // [it, end()), [begin(), it) is the part that contains the max-heap
- // of small buckets.
- std::vector<Bucket>::iterator it;
- for (it = buckets.end(); it != buckets.begin(); --it) {
- if (buckets.front().size < min_size_bytes)
- break;
-
- // Put the largest bucket in [begin, it) at |it - 1| and max-heapify
- // [begin, it - 1). This puts the next largest bucket at |buckets.front()|.
- std::pop_heap(buckets.begin(), it);
- }
-
- // At this point, |buckets| looks like this (numbers are bucket sizes):
- //
- // <-- max-heap of small buckets --->
- // <-- large buckets by ascending size -->
- // [ 19 | 11 | 13 | 7 | 2 | 5 | ... | 83 | 89 | 97 ]
- // ^ ^ ^
- // | | |
- // begin() it end()
-
- // Discard the long tail of buckets that contribute less than a percent.
- buckets.erase(buckets.begin(), it);
-
- return buckets;
-}
-
-} // namespace
-
-bool operator<(Entry lhs, Entry rhs) {
- // There is no need to compare |size|. If the backtrace and type name are
- // equal then the sizes must be equal as well.
- return std::tie(lhs.stack_frame_id, lhs.type_id) <
- std::tie(rhs.stack_frame_id, rhs.type_id);
-}
-
-HeapDumpWriter::HeapDumpWriter(StackFrameDeduplicator* stack_frame_deduplicator,
- TypeNameDeduplicator* type_name_deduplicator,
- uint32_t breakdown_threshold_bytes)
- : stack_frame_deduplicator_(stack_frame_deduplicator),
- type_name_deduplicator_(type_name_deduplicator),
- breakdown_threshold_bytes_(breakdown_threshold_bytes) {}
-
-HeapDumpWriter::~HeapDumpWriter() = default;
-
-bool HeapDumpWriter::AddEntryForBucket(const Bucket& bucket) {
- // The contexts in the bucket are all different, but the [begin, cursor) range
- // is equal for all contexts in the bucket, and the type names are the same if
- // |is_broken_down_by_type_name| is set.
- DCHECK(!bucket.metrics_by_context.empty());
-
- const AllocationContext* context = bucket.metrics_by_context.front().first;
-
- const StackFrame* backtrace_begin = std::begin(context->backtrace.frames);
- const StackFrame* backtrace_end = backtrace_begin + bucket.backtrace_cursor;
- DCHECK_LE(bucket.backtrace_cursor, arraysize(context->backtrace.frames));
-
- Entry entry;
- entry.stack_frame_id =
- stack_frame_deduplicator_->Insert(backtrace_begin, backtrace_end);
-
- // Deduplicate the type name, or use ID -1 if type name is not set.
- entry.type_id = bucket.is_broken_down_by_type_name
- ? type_name_deduplicator_->Insert(context->type_name)
- : -1;
-
- entry.size = bucket.size;
- entry.count = bucket.count;
-
- auto position_and_inserted = entries_.insert(entry);
- return position_and_inserted.second;
-}
-
-void HeapDumpWriter::BreakDown(const Bucket& bucket) {
- auto by_backtrace = BreakDownBy(bucket, BreakDownMode::kByBacktrace,
- breakdown_threshold_bytes_);
- auto by_type_name = BreakDownBy(bucket, BreakDownMode::kByTypeName,
- breakdown_threshold_bytes_);
-
- // Insert entries for the buckets. If a bucket was not present before, it has
- // not been broken down before, so recursively continue breaking down in that
- // case. There might be multiple routes to the same entry (first break down
- // by type name, then by backtrace, or first by backtrace and then by type),
- // so a set is used to avoid dumping and breaking down entries more than once.
-
- for (const Bucket& subbucket : by_backtrace)
- if (AddEntryForBucket(subbucket))
- BreakDown(subbucket);
-
- for (const Bucket& subbucket : by_type_name)
- if (AddEntryForBucket(subbucket))
- BreakDown(subbucket);
-}
-
-const std::set<Entry>& HeapDumpWriter::Summarize(
- const std::unordered_map<AllocationContext, AllocationMetrics>&
- metrics_by_context) {
- // Start with one bucket that represents the entire heap. Iterate by
- // reference, because the allocation contexts are going to point to allocation
- // contexts stored in |metrics_by_context|.
- Bucket root_bucket;
- for (const auto& context_and_metrics : metrics_by_context) {
- DCHECK_GT(context_and_metrics.second.size, 0u);
- DCHECK_GT(context_and_metrics.second.count, 0u);
- const AllocationContext* context = &context_and_metrics.first;
- root_bucket.metrics_by_context.push_back(
- std::make_pair(context, context_and_metrics.second));
- root_bucket.size += context_and_metrics.second.size;
- root_bucket.count += context_and_metrics.second.count;
- }
-
- AddEntryForBucket(root_bucket);
-
- // Recursively break down the heap and fill |entries_| with entries to dump.
- BreakDown(root_bucket);
-
- return entries_;
-}
-
-std::unique_ptr<TracedValue> Serialize(const std::set<Entry>& entries) {
- std::string buffer;
- std::unique_ptr<TracedValue> traced_value(new TracedValue);
-
- traced_value->BeginArray("entries");
-
- for (const Entry& entry : entries) {
- traced_value->BeginDictionary();
-
- // Format size as hexadecimal string into |buffer|.
- SStringPrintf(&buffer, "%" PRIx64, static_cast<uint64_t>(entry.size));
- traced_value->SetString("size", buffer);
-
- SStringPrintf(&buffer, "%" PRIx64, static_cast<uint64_t>(entry.count));
- traced_value->SetString("count", buffer);
-
- if (entry.stack_frame_id == -1) {
- // An empty backtrace (which will have ID -1) is represented by the empty
- // string, because there is no leaf frame to reference in |stackFrames|.
- traced_value->SetString("bt", "");
- } else {
- // Format index of the leaf frame as a string, because |stackFrames| is a
- // dictionary, not an array.
- SStringPrintf(&buffer, "%i", entry.stack_frame_id);
- traced_value->SetString("bt", buffer);
- }
-
- // Type ID -1 (cumulative size for all types) is represented by the absence
- // of the "type" key in the dictionary.
- if (entry.type_id != -1) {
- // Format the type ID as a string.
- SStringPrintf(&buffer, "%i", entry.type_id);
- traced_value->SetString("type", buffer);
- }
-
- traced_value->EndDictionary();
- }
-
- traced_value->EndArray(); // "entries"
- return traced_value;
-}
-
-} // namespace internal
-
-std::unique_ptr<TracedValue> ExportHeapDump(
- const std::unordered_map<AllocationContext, AllocationMetrics>&
- metrics_by_context,
- const HeapProfilerSerializationState& heap_profiler_serialization_state) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("memory-infra"), "ExportHeapDump");
- internal::HeapDumpWriter writer(
- heap_profiler_serialization_state.stack_frame_deduplicator(),
- heap_profiler_serialization_state.type_name_deduplicator(),
- heap_profiler_serialization_state
- .heap_profiler_breakdown_threshold_bytes());
- return Serialize(writer.Summarize(metrics_by_context));
-}
-
-} // namespace trace_event
-} // namespace base
diff --git a/chromium/base/trace_event/heap_profiler_heap_dump_writer.h b/chromium/base/trace_event/heap_profiler_heap_dump_writer.h
deleted file mode 100644
index 3366c286f52..00000000000
--- a/chromium/base/trace_event/heap_profiler_heap_dump_writer.h
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_TRACE_EVENT_HEAP_PROFILER_HEAP_DUMP_WRITER_H_
-#define BASE_TRACE_EVENT_HEAP_PROFILER_HEAP_DUMP_WRITER_H_
-
-#include <stddef.h>
-
-#include <memory>
-#include <set>
-#include <unordered_map>
-
-#include "base/base_export.h"
-#include "base/macros.h"
-#include "base/trace_event/heap_profiler_allocation_context.h"
-
-namespace base {
-namespace trace_event {
-
-class HeapProfilerSerializationState;
-class StackFrameDeduplicator;
-class TracedValue;
-class TypeNameDeduplicator;
-
-// Aggregates |metrics_by_context|, recursively breaks down the heap, and
-// returns a traced value with an "entries" array that can be dumped in the
-// trace log, following the format described in https://goo.gl/KY7zVE. The
-// number of entries is kept reasonable because long tails are not included.
-BASE_EXPORT std::unique_ptr<TracedValue> ExportHeapDump(
- const std::unordered_map<AllocationContext, AllocationMetrics>&
- metrics_by_context,
- const HeapProfilerSerializationState& heap_profiler_serialization_state);
-
-namespace internal {
-
-namespace {
-struct Bucket;
-}
-
-// An entry in the "entries" array as described in https://goo.gl/KY7zVE.
-struct BASE_EXPORT Entry {
- size_t size;
- size_t count;
-
- // References a backtrace in the stack frame deduplicator. -1 means empty
- // backtrace (the root of the tree).
- int stack_frame_id;
-
- // References a type name in the type name deduplicator. -1 indicates that
- // the size is the cumulative size for all types (the root of the tree).
- int type_id;
-};
-
-// Comparison operator to enable putting |Entry| in a |std::set|.
-BASE_EXPORT bool operator<(Entry lhs, Entry rhs);
-
-// Serializes entries to an "entries" array in a traced value.
-BASE_EXPORT std::unique_ptr<TracedValue> Serialize(const std::set<Entry>& dump);
-
-// Helper class to dump a snapshot of an |AllocationRegister| or other heap
-// bookkeeping structure into a |TracedValue|. This class is intended to be
-// used as a one-shot local instance on the stack.
-class BASE_EXPORT HeapDumpWriter {
- public:
- // The |stack_frame_deduplicator| and |type_name_deduplicator| are not owned.
- // The heap dump writer assumes exclusive access to them during the lifetime
- // of the dump writer. The heap dumps are broken down for allocations bigger
- // than |breakdown_threshold_bytes|.
- HeapDumpWriter(StackFrameDeduplicator* stack_frame_deduplicator,
- TypeNameDeduplicator* type_name_deduplicator,
- uint32_t breakdown_threshold_bytes);
-
- ~HeapDumpWriter();
-
- // Aggregates allocations to compute the total size of the heap, then breaks
- // down the heap recursively. This produces the values that should be dumped
- // in the "entries" array. The number of entries is kept reasonable because
- // long tails are not included. Use |Serialize| to convert to a traced value.
- const std::set<Entry>& Summarize(
- const std::unordered_map<AllocationContext, AllocationMetrics>&
- metrics_by_context);
-
- private:
- // Inserts an |Entry| for |Bucket| into |entries_|. Returns false if the
- // entry was present before, true if it was not.
- bool AddEntryForBucket(const Bucket& bucket);
-
- // Recursively breaks down a bucket into smaller buckets and adds entries for
- // the buckets worth dumping to |entries_|.
- void BreakDown(const Bucket& bucket);
-
- // The collection of entries that is filled by |Summarize|.
- std::set<Entry> entries_;
-
- // Helper for generating the |stackFrames| dictionary. Not owned, must outlive
- // this heap dump writer instance.
- StackFrameDeduplicator* const stack_frame_deduplicator_;
-
- // Helper for converting type names to IDs. Not owned, must outlive this heap
- // dump writer instance.
- TypeNameDeduplicator* const type_name_deduplicator_;
-
- // Minimum size of an allocation for which an allocation bucket will be
- // broken down with children.
- uint32_t breakdown_threshold_bytes_;
-
- DISALLOW_COPY_AND_ASSIGN(HeapDumpWriter);
-};
-
-} // namespace internal
-} // namespace trace_event
-} // namespace base
-
-#endif // BASE_TRACE_EVENT_HEAP_PROFILER_HEAP_DUMP_WRITER_H_
diff --git a/chromium/base/trace_event/heap_profiler_heap_dump_writer_unittest.cc b/chromium/base/trace_event/heap_profiler_heap_dump_writer_unittest.cc
deleted file mode 100644
index 93e8feea75a..00000000000
--- a/chromium/base/trace_event/heap_profiler_heap_dump_writer_unittest.cc
+++ /dev/null
@@ -1,330 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/trace_event/heap_profiler_heap_dump_writer.h"
-
-#include <stddef.h>
-
-#include <memory>
-#include <set>
-#include <string>
-
-#include "base/json/json_reader.h"
-#include "base/macros.h"
-#include "base/memory/ptr_util.h"
-#include "base/trace_event/heap_profiler_allocation_context.h"
-#include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
-#include "base/trace_event/heap_profiler_type_name_deduplicator.h"
-#include "base/trace_event/trace_event_argument.h"
-#include "base/values.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace {
-
-using base::trace_event::StackFrame;
-
-// Define all strings once, because the deduplicator requires pointer equality,
-// and string interning is unreliable.
-StackFrame kBrowserMain = StackFrame::FromTraceEventName("BrowserMain");
-StackFrame kRendererMain = StackFrame::FromTraceEventName("RendererMain");
-StackFrame kCreateWidget = StackFrame::FromTraceEventName("CreateWidget");
-StackFrame kInitialize = StackFrame::FromTraceEventName("Initialize");
-StackFrame kGetBitmap = StackFrame::FromTraceEventName("GetBitmap");
-
-const char kInt[] = "int";
-const char kBool[] = "bool";
-const char kString[] = "string";
-
-} // namespace
-
-namespace base {
-namespace trace_event {
-namespace internal {
-
-std::unique_ptr<const Value> WriteAndReadBack(const std::set<Entry>& entries) {
- std::unique_ptr<TracedValue> traced_value = Serialize(entries);
- std::string json;
- traced_value->AppendAsTraceFormat(&json);
- return JSONReader::Read(json);
-}
-
-std::unique_ptr<const DictionaryValue> WriteAndReadBackEntry(Entry entry) {
- std::set<Entry> input_entries;
- input_entries.insert(entry);
-
- std::unique_ptr<const Value> json_dict = WriteAndReadBack(input_entries);
-
- // Note: Ideally these should use |ASSERT_TRUE| instead of |EXPECT_TRUE|, but
- // |ASSERT_TRUE| can only be used in void functions.
- const DictionaryValue* dictionary;
- EXPECT_TRUE(json_dict->GetAsDictionary(&dictionary));
-
- const ListValue* json_entries;
- EXPECT_TRUE(dictionary->GetList("entries", &json_entries));
-
- const DictionaryValue* json_entry;
- EXPECT_TRUE(json_entries->GetDictionary(0, &json_entry));
-
- return json_entry->CreateDeepCopy();
-}
-
-// Given a desired stack frame ID and type ID, looks up the entry in the set and
-// asserts that it is present and has the expected size and count.
-void AssertSizeAndCountEq(const std::set<Entry>& entries,
- int stack_frame_id,
- int type_id,
- const AllocationMetrics& expected) {
- // The comparison operator for |Entry| does not take size into account, so by
- // setting only stack frame ID and type ID, the real entry can be found.
- Entry entry;
- entry.stack_frame_id = stack_frame_id;
- entry.type_id = type_id;
- auto it = entries.find(entry);
-
- ASSERT_NE(entries.end(), it) << "No entry found for sf = " << stack_frame_id
- << ", type = " << type_id << ".";
- ASSERT_EQ(expected.size, it->size) << "Wrong size for sf = " << stack_frame_id
- << ", type = " << type_id << ".";
- ASSERT_EQ(expected.count, it->count)
- << "Wrong count for sf = " << stack_frame_id << ", type = " << type_id
- << ".";
-}
-
-// Given a desired stack frame ID and type ID, asserts that no entry was dumped
-// for that that particular combination of stack frame and type.
-void AssertNotDumped(const std::set<Entry>& entries,
- int stack_frame_id,
- int type_id) {
- // The comparison operator for |Entry| does not take size into account, so by
- // setting only stack frame ID and type ID, the real entry can be found.
- Entry entry;
- entry.stack_frame_id = stack_frame_id;
- entry.type_id = type_id;
- auto it = entries.find(entry);
- ASSERT_EQ(entries.end(), it)
- << "Entry should not be present for sf = " << stack_frame_id
- << ", type = " << type_id << ".";
-}
-
-TEST(HeapDumpWriterTest, BacktraceIndex) {
- Entry entry;
- entry.stack_frame_id = -1; // -1 means empty backtrace.
- entry.type_id = 0;
- entry.size = 1;
- entry.count = 1;
-
- std::unique_ptr<const DictionaryValue> json_entry =
- WriteAndReadBackEntry(entry);
-
- // For an empty backtrace, the "bt" key cannot reference a stack frame.
- // Instead it should be set to the empty string.
- std::string backtrace_index;
- ASSERT_TRUE(json_entry->GetString("bt", &backtrace_index));
- ASSERT_EQ("", backtrace_index);
-
- // Also verify that a non-negative backtrace index is dumped properly.
- entry.stack_frame_id = 2;
- json_entry = WriteAndReadBackEntry(entry);
- ASSERT_TRUE(json_entry->GetString("bt", &backtrace_index));
- ASSERT_EQ("2", backtrace_index);
-}
-
-TEST(HeapDumpWriterTest, TypeId) {
- Entry entry;
- entry.type_id = -1; // -1 means sum over all types.
- entry.stack_frame_id = 0;
- entry.size = 1;
- entry.count = 1;
-
- std::unique_ptr<const DictionaryValue> json_entry =
- WriteAndReadBackEntry(entry);
-
- // Entries for the cumulative size of all types should not have the "type"
- // key set.
- ASSERT_FALSE(json_entry->HasKey("type"));
-
- // Also verify that a non-negative type ID is dumped properly.
- entry.type_id = 2;
- json_entry = WriteAndReadBackEntry(entry);
- std::string type_id;
- ASSERT_TRUE(json_entry->GetString("type", &type_id));
- ASSERT_EQ("2", type_id);
-}
-
-TEST(HeapDumpWriterTest, SizeAndCountAreHexadecimal) {
- // Take a number between 2^63 and 2^64 (or between 2^31 and 2^32 if |size_t|
- // is not 64 bits).
- const size_t large_value =
- sizeof(size_t) == 8 ? 0xffffffffffffffc5 : 0xffffff9d;
- const char* large_value_str =
- sizeof(size_t) == 8 ? "ffffffffffffffc5" : "ffffff9d";
- Entry entry;
- entry.type_id = 0;
- entry.stack_frame_id = 0;
- entry.size = large_value;
- entry.count = large_value;
-
- std::unique_ptr<const DictionaryValue> json_entry =
- WriteAndReadBackEntry(entry);
-
- std::string size;
- ASSERT_TRUE(json_entry->GetString("size", &size));
- ASSERT_EQ(large_value_str, size);
-
- std::string count;
- ASSERT_TRUE(json_entry->GetString("count", &count));
- ASSERT_EQ(large_value_str, count);
-}
-
-TEST(HeapDumpWriterTest, BacktraceTypeNameTable) {
- std::unordered_map<AllocationContext, AllocationMetrics> metrics_by_context;
-
- AllocationContext ctx;
- ctx.backtrace.frames[0] = kBrowserMain;
- ctx.backtrace.frames[1] = kCreateWidget;
- ctx.backtrace.frame_count = 2;
- ctx.type_name = kInt;
-
- // 10 bytes with context { type: int, bt: [BrowserMain, CreateWidget] }.
- metrics_by_context[ctx] = {10, 5};
-
- ctx.type_name = kBool;
-
- // 18 bytes with context { type: bool, bt: [BrowserMain, CreateWidget] }.
- metrics_by_context[ctx] = {18, 18};
-
- ctx.backtrace.frames[0] = kRendererMain;
- ctx.backtrace.frames[1] = kInitialize;
- ctx.backtrace.frame_count = 2;
-
- // 30 bytes with context { type: bool, bt: [RendererMain, Initialize] }.
- metrics_by_context[ctx] = {30, 30};
-
- ctx.type_name = kString;
-
- // 19 bytes with context { type: string, bt: [RendererMain, Initialize] }.
- metrics_by_context[ctx] = {19, 4};
-
- // At this point the heap looks like this:
- //
- // | | CrWidget <- BrMain | Init <- RenMain | Sum |
- // +--------+--------------------+-----------------+-------------+
- // | | size count | size count | size count |
- // | int | 10 5 | 0 0 | 10 5 |
- // | bool | 18 18 | 30 30 | 48 48 |
- // | string | 0 0 | 19 4 | 19 4 |
- // +--------+--------------------+-----------------+-------------+
- // | Sum | 28 23 | 49 34 | 77 57 |
-
- auto stack_frame_deduplicator = WrapUnique(new StackFrameDeduplicator);
- auto type_name_deduplicator = WrapUnique(new TypeNameDeduplicator);
- HeapDumpWriter writer(stack_frame_deduplicator.get(),
- type_name_deduplicator.get(), 10u);
- const std::set<Entry>& dump = writer.Summarize(metrics_by_context);
-
- // Get the indices of the backtraces and types by adding them again to the
- // deduplicator. Because they were added before, the same number is returned.
- StackFrame bt0[] = {kRendererMain, kInitialize};
- StackFrame bt1[] = {kBrowserMain, kCreateWidget};
- int bt_renderer_main = stack_frame_deduplicator->Insert(bt0, bt0 + 1);
- int bt_browser_main = stack_frame_deduplicator->Insert(bt1, bt1 + 1);
- int bt_renderer_main_initialize =
- stack_frame_deduplicator->Insert(bt0, bt0 + 2);
- int bt_browser_main_create_widget =
- stack_frame_deduplicator->Insert(bt1, bt1 + 2);
- int type_id_int = type_name_deduplicator->Insert(kInt);
- int type_id_bool = type_name_deduplicator->Insert(kBool);
- int type_id_string = type_name_deduplicator->Insert(kString);
-
- // Full heap should have size 77.
- AssertSizeAndCountEq(dump, -1, -1, {77, 57});
-
- // 49 bytes in 34 chunks were allocated in RendererMain and children. Also
- // check the type breakdown.
- AssertSizeAndCountEq(dump, bt_renderer_main, -1, {49, 34});
- AssertSizeAndCountEq(dump, bt_renderer_main, type_id_bool, {30, 30});
- AssertSizeAndCountEq(dump, bt_renderer_main, type_id_string, {19, 4});
-
- // 28 bytes in 23 chunks were allocated in BrowserMain and children. Also
- // check the type breakdown.
- AssertSizeAndCountEq(dump, bt_browser_main, -1, {28, 23});
- AssertSizeAndCountEq(dump, bt_browser_main, type_id_int, {10, 5});
- AssertSizeAndCountEq(dump, bt_browser_main, type_id_bool, {18, 18});
-
- // In this test all bytes are allocated in leaf nodes, so check again one
- // level deeper.
- AssertSizeAndCountEq(dump, bt_renderer_main_initialize, -1, {49, 34});
- AssertSizeAndCountEq(dump, bt_renderer_main_initialize, type_id_bool,
- {30, 30});
- AssertSizeAndCountEq(dump, bt_renderer_main_initialize, type_id_string,
- {19, 4});
- AssertSizeAndCountEq(dump, bt_browser_main_create_widget, -1, {28, 23});
- AssertSizeAndCountEq(dump, bt_browser_main_create_widget, type_id_int,
- {10, 5});
- AssertSizeAndCountEq(dump, bt_browser_main_create_widget, type_id_bool,
- {18, 18});
-
- // The type breakdown of the entrie heap should have been dumped as well.
- AssertSizeAndCountEq(dump, -1, type_id_int, {10, 5});
- AssertSizeAndCountEq(dump, -1, type_id_bool, {48, 48});
- AssertSizeAndCountEq(dump, -1, type_id_string, {19, 4});
-}
-
-TEST(HeapDumpWriterTest, InsignificantValuesNotDumped) {
- std::unordered_map<AllocationContext, AllocationMetrics> metrics_by_context;
-
- AllocationContext ctx;
- ctx.backtrace.frames[0] = kBrowserMain;
- ctx.backtrace.frames[1] = kCreateWidget;
- ctx.backtrace.frame_count = 2;
-
- // 0.5 KiB and 1 chunk in BrowserMain -> CreateWidget itself.
- metrics_by_context[ctx] = {512, 1};
-
- // 1 MiB and 1 chunk in BrowserMain -> CreateWidget -> GetBitmap.
- ctx.backtrace.frames[2] = kGetBitmap;
- ctx.backtrace.frame_count = 3;
- metrics_by_context[ctx] = {1024 * 1024, 1};
-
- // 400B and 1 chunk in BrowserMain -> CreateWidget -> Initialize.
- ctx.backtrace.frames[2] = kInitialize;
- ctx.backtrace.frame_count = 3;
- metrics_by_context[ctx] = {400, 1};
-
- auto stack_frame_deduplicator = WrapUnique(new StackFrameDeduplicator);
- auto type_name_deduplicator = WrapUnique(new TypeNameDeduplicator);
- HeapDumpWriter writer(stack_frame_deduplicator.get(),
- type_name_deduplicator.get(), 512u);
- const std::set<Entry>& dump = writer.Summarize(metrics_by_context);
-
- // Get the indices of the backtraces and types by adding them again to the
- // deduplicator. Because they were added before, the same number is returned.
- StackFrame bt0[] = {kBrowserMain, kCreateWidget, kGetBitmap};
- StackFrame bt1[] = {kBrowserMain, kCreateWidget, kInitialize};
- int bt_browser_main = stack_frame_deduplicator->Insert(bt0, bt0 + 1);
- int bt_create_widget = stack_frame_deduplicator->Insert(bt0, bt0 + 2);
- int bt_get_bitmap = stack_frame_deduplicator->Insert(bt0, bt0 + 3);
- int bt_initialize = stack_frame_deduplicator->Insert(bt1, bt1 + 3);
-
- // Full heap should have size of 1 MiB + .9 KiB and 3 chunks.
- AssertSizeAndCountEq(dump, -1, -1 /* No type specified */,
- {1024 * 1024 + 512 + 400, 3});
-
- // |GetBitmap| allocated 1 MiB and 1 chunk.
- AssertSizeAndCountEq(dump, bt_get_bitmap, -1, {1024 * 1024, 1});
-
- // Because |GetBitmap| was dumped, all of its parent nodes should have been
- // dumped too. |CreateWidget| has 1 MiB in |GetBitmap|, 400 bytes in
- // |Initialize|, and 512 bytes of its own and each in 1 chunk.
- AssertSizeAndCountEq(dump, bt_create_widget, -1,
- {1024 * 1024 + 400 + 512, 3});
- AssertSizeAndCountEq(dump, bt_browser_main, -1, {1024 * 1024 + 400 + 512, 3});
-
- // Initialize was not significant, it should not have been dumped.
- AssertNotDumped(dump, bt_initialize, -1);
-}
-
-} // namespace internal
-} // namespace trace_event
-} // namespace base
diff --git a/chromium/base/trace_event/heap_profiler_serialization_state.cc b/chromium/base/trace_event/heap_profiler_serialization_state.cc
deleted file mode 100644
index b1866e72f1c..00000000000
--- a/chromium/base/trace_event/heap_profiler_serialization_state.cc
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/trace_event/heap_profiler_serialization_state.h"
-
-namespace base {
-namespace trace_event {
-
-HeapProfilerSerializationState::HeapProfilerSerializationState()
- : heap_profiler_breakdown_threshold_bytes_(0) {}
-HeapProfilerSerializationState::~HeapProfilerSerializationState() = default;
-
-void HeapProfilerSerializationState::SetStackFrameDeduplicator(
- std::unique_ptr<StackFrameDeduplicator> stack_frame_deduplicator) {
- DCHECK(!stack_frame_deduplicator_);
- stack_frame_deduplicator_ = std::move(stack_frame_deduplicator);
-}
-
-void HeapProfilerSerializationState::SetTypeNameDeduplicator(
- std::unique_ptr<TypeNameDeduplicator> type_name_deduplicator) {
- DCHECK(!type_name_deduplicator_);
- type_name_deduplicator_ = std::move(type_name_deduplicator);
-}
-
-} // namespace trace_event
-} // namespace base
diff --git a/chromium/base/trace_event/heap_profiler_serialization_state.h b/chromium/base/trace_event/heap_profiler_serialization_state.h
deleted file mode 100644
index 53c56871f8d..00000000000
--- a/chromium/base/trace_event/heap_profiler_serialization_state.h
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_TRACE_EVENT_HEAP_PROFILER_SERIALIZATION_STATE_H_
-#define BASE_TRACE_EVENT_HEAP_PROFILER_SERIALIZATION_STATE_H_
-
-#include <memory>
-#include <set>
-
-#include "base/base_export.h"
-#include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
-#include "base/trace_event/heap_profiler_type_name_deduplicator.h"
-#include "base/trace_event/memory_dump_request_args.h"
-
-namespace base {
-namespace trace_event {
-
-// Container for state variables that should be shared across all the memory
-// dumps in a tracing session.
-class BASE_EXPORT HeapProfilerSerializationState
- : public RefCountedThreadSafe<HeapProfilerSerializationState> {
- public:
- HeapProfilerSerializationState();
-
- // Returns the stack frame deduplicator that should be used by memory dump
- // providers when doing a heap dump.
- StackFrameDeduplicator* stack_frame_deduplicator() const {
- return stack_frame_deduplicator_.get();
- }
-
- void SetStackFrameDeduplicator(
- std::unique_ptr<StackFrameDeduplicator> stack_frame_deduplicator);
-
- // Returns the type name deduplicator that should be used by memory dump
- // providers when doing a heap dump.
- TypeNameDeduplicator* type_name_deduplicator() const {
- return type_name_deduplicator_.get();
- }
-
- void SetTypeNameDeduplicator(
- std::unique_ptr<TypeNameDeduplicator> type_name_deduplicator);
-
- void SetAllowedDumpModes(
- std::set<MemoryDumpLevelOfDetail> allowed_dump_modes);
-
- bool IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) const;
-
- void set_heap_profiler_breakdown_threshold_bytes(uint32_t value) {
- heap_profiler_breakdown_threshold_bytes_ = value;
- }
-
- uint32_t heap_profiler_breakdown_threshold_bytes() const {
- return heap_profiler_breakdown_threshold_bytes_;
- }
-
- bool is_initialized() const {
- return stack_frame_deduplicator_ && type_name_deduplicator_ &&
- heap_profiler_breakdown_threshold_bytes_;
- }
-
- private:
- friend class RefCountedThreadSafe<HeapProfilerSerializationState>;
- ~HeapProfilerSerializationState();
-
- // Deduplicates backtraces in heap dumps so they can be written once when the
- // trace is finalized.
- std::unique_ptr<StackFrameDeduplicator> stack_frame_deduplicator_;
-
- // Deduplicates type names in heap dumps so they can be written once when the
- // trace is finalized.
- std::unique_ptr<TypeNameDeduplicator> type_name_deduplicator_;
-
- uint32_t heap_profiler_breakdown_threshold_bytes_;
-};
-
-} // namespace trace_event
-} // namespace base
-
-#endif // BASE_TRACE_EVENT_HEAP_PROFILER_SERIALIZATION_STATE_H
diff --git a/chromium/base/trace_event/heap_profiler_stack_frame_deduplicator.cc b/chromium/base/trace_event/heap_profiler_stack_frame_deduplicator.cc
deleted file mode 100644
index c05cd0a25e3..00000000000
--- a/chromium/base/trace_event/heap_profiler_stack_frame_deduplicator.cc
+++ /dev/null
@@ -1,195 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
-
-#include <inttypes.h>
-#include <stddef.h>
-
-#include <algorithm>
-#include <string>
-#include <utility>
-
-#include "base/hash.h"
-#include "base/strings/stringprintf.h"
-#include "base/trace_event/memory_usage_estimator.h"
-#include "base/trace_event/trace_event.h"
-#include "base/trace_event/trace_event_argument.h"
-#include "base/trace_event/trace_event_memory_overhead.h"
-
-namespace base {
-namespace trace_event {
-
-namespace {
-
-// Dumb hash function that nevertheless works surprisingly well and
-// produces ~0 collisions on real backtraces.
-size_t HashBacktrace(const StackFrame* begin, const StackFrame* end) {
- size_t hash = 0;
- for (; begin != end; begin++) {
- hash += reinterpret_cast<uintptr_t>(begin->value);
- }
- return hash;
-}
-
-} // namespace
-
-StackFrameDeduplicator::FrameNode::FrameNode(StackFrame frame,
- int parent_frame_index)
- : frame(frame), parent_frame_index(parent_frame_index) {}
-StackFrameDeduplicator::FrameNode::FrameNode(const FrameNode& other) = default;
-StackFrameDeduplicator::FrameNode::~FrameNode() = default;
-
-size_t StackFrameDeduplicator::FrameNode::EstimateMemoryUsage() const {
- return base::trace_event::EstimateMemoryUsage(children);
-}
-
-StackFrameDeduplicator::StackFrameDeduplicator() = default;
-StackFrameDeduplicator::~StackFrameDeduplicator() = default;
-
-bool StackFrameDeduplicator::Match(int frame_index,
- const StackFrame* begin_frame,
- const StackFrame* end_frame) const {
- // |frame_index| identifies the bottom frame, i.e. we need to walk
- // backtrace backwards.
- const StackFrame* current_frame = end_frame - 1;
- for (; current_frame >= begin_frame; --current_frame) {
- const FrameNode& node = frames_[frame_index];
- if (node.frame != *current_frame) {
- break;
- }
-
- frame_index = node.parent_frame_index;
- if (frame_index == FrameNode::kInvalidFrameIndex) {
- if (current_frame == begin_frame) {
- // We're at the top node and we matched all backtrace frames,
- // i.e. we successfully matched the backtrace.
- return true;
- }
- break;
- }
- }
-
- return false;
-}
-
-int StackFrameDeduplicator::Insert(const StackFrame* begin_frame,
- const StackFrame* end_frame) {
- if (begin_frame == end_frame) {
- return FrameNode::kInvalidFrameIndex;
- }
-
- size_t backtrace_hash = HashBacktrace(begin_frame, end_frame);
-
- // Check if we know about this backtrace.
- auto backtrace_it = backtrace_lookup_table_.find(backtrace_hash);
- if (backtrace_it != backtrace_lookup_table_.end()) {
- int backtrace_index = backtrace_it->second;
- if (Match(backtrace_index, begin_frame, end_frame)) {
- return backtrace_index;
- }
- }
-
- int frame_index = FrameNode::kInvalidFrameIndex;
- base::flat_map<StackFrame, int>* nodes = &roots_;
-
- // Loop through the frames, early out when a frame is null.
- for (const StackFrame* it = begin_frame; it != end_frame; it++) {
- StackFrame frame = *it;
-
- auto node = nodes->find(frame);
- if (node == nodes->end()) {
- // There is no tree node for this frame yet, create it. The parent node
- // is the node associated with the previous frame.
- FrameNode frame_node(frame, frame_index);
-
- // The new frame node will be appended, so its index is the current size
- // of the vector.
- frame_index = static_cast<int>(frames_.size());
-
- // Add the node to the trie so it will be found next time.
- nodes->insert(std::make_pair(frame, frame_index));
-
- // Append the node after modifying |nodes|, because the |frames_| vector
- // might need to resize, and this invalidates the |nodes| pointer.
- frames_.push_back(frame_node);
- } else {
- // A tree node for this frame exists. Look for the next one.
- frame_index = node->second;
- }
-
- nodes = &frames_[frame_index].children;
- }
-
- // Remember the backtrace.
- backtrace_lookup_table_[backtrace_hash] = frame_index;
-
- return frame_index;
-}
-
-void StackFrameDeduplicator::AppendAsTraceFormat(std::string* out) const {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("memory-infra"),
- "StackFrameDeduplicator::AppendAsTraceFormat");
- out->append("{"); // Begin the |stackFrames| dictionary.
-
- int i = 0;
- auto frame_node = begin();
- auto it_end = end();
- std::string stringify_buffer;
-
- while (frame_node != it_end) {
- // The |stackFrames| format is a dictionary, not an array, so the
- // keys are stringified indices. Write the index manually, then use
- // |TracedValue| to format the object. This is to avoid building the
- // entire dictionary as a |TracedValue| in memory.
- SStringPrintf(&stringify_buffer, "\"%d\":", i);
- out->append(stringify_buffer);
-
- std::unique_ptr<TracedValue> frame_node_value(new TracedValue);
- const StackFrame& frame = frame_node->frame;
- switch (frame.type) {
- case StackFrame::Type::TRACE_EVENT_NAME:
- frame_node_value->SetString("name",
- static_cast<const char*>(frame.value));
- break;
- case StackFrame::Type::THREAD_NAME:
- SStringPrintf(&stringify_buffer,
- "[Thread: %s]",
- static_cast<const char*>(frame.value));
- frame_node_value->SetString("name", stringify_buffer);
- break;
- case StackFrame::Type::PROGRAM_COUNTER:
- SStringPrintf(&stringify_buffer,
- "pc:%" PRIxPTR,
- reinterpret_cast<uintptr_t>(frame.value));
- frame_node_value->SetString("name", stringify_buffer);
- break;
- }
- if (frame_node->parent_frame_index != FrameNode::kInvalidFrameIndex) {
- SStringPrintf(&stringify_buffer, "%d", frame_node->parent_frame_index);
- frame_node_value->SetString("parent", stringify_buffer);
- }
- frame_node_value->AppendAsTraceFormat(out);
-
- i++;
- frame_node++;
-
- if (frame_node != it_end)
- out->append(",");
- }
-
- out->append("}"); // End the |stackFrames| dictionary.
-}
-
-void StackFrameDeduplicator::EstimateTraceMemoryOverhead(
- TraceEventMemoryOverhead* overhead) {
- size_t memory_usage = EstimateMemoryUsage(frames_) +
- EstimateMemoryUsage(roots_) +
- EstimateMemoryUsage(backtrace_lookup_table_);
- overhead->Add(TraceEventMemoryOverhead::kHeapProfilerStackFrameDeduplicator,
- sizeof(StackFrameDeduplicator) + memory_usage);
-}
-
-} // namespace trace_event
-} // namespace base
diff --git a/chromium/base/trace_event/heap_profiler_stack_frame_deduplicator.h b/chromium/base/trace_event/heap_profiler_stack_frame_deduplicator.h
deleted file mode 100644
index ac8d895ca46..00000000000
--- a/chromium/base/trace_event/heap_profiler_stack_frame_deduplicator.h
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_TRACE_EVENT_HEAP_PROFILER_STACK_FRAME_DEDUPLICATOR_H_
-#define BASE_TRACE_EVENT_HEAP_PROFILER_STACK_FRAME_DEDUPLICATOR_H_
-
-#include <string>
-#include <unordered_map>
-
-#include "base/base_export.h"
-#include "base/containers/circular_deque.h"
-#include "base/containers/flat_map.h"
-#include "base/macros.h"
-#include "base/trace_event/heap_profiler_allocation_context.h"
-#include "base/trace_event/trace_event_impl.h"
-
-namespace base {
-namespace trace_event {
-
-class TraceEventMemoryOverhead;
-
-// A data structure that allows grouping a set of backtraces in a space-
-// efficient manner by creating a call tree and writing it as a set of (node,
-// parent) pairs. The tree nodes reference both parent and children. The parent
-// is referenced by index into |frames_|. The children are referenced via a map
-// of |StackFrame|s to index into |frames_|. So there is a trie for bottum-up
-// lookup of a backtrace for deduplication, and a tree for compact storage in
-// the trace log.
-class BASE_EXPORT StackFrameDeduplicator : public ConvertableToTraceFormat {
- public:
- // A node in the call tree.
- struct FrameNode {
- FrameNode(StackFrame frame, int parent_frame_index);
- FrameNode(const FrameNode& other);
- ~FrameNode();
-
- size_t EstimateMemoryUsage() const;
-
- StackFrame frame;
-
- // The index of the parent stack frame in |frames_|, or kInvalidFrameIndex
- // if there is no parent frame (when it is at the bottom of the call stack).
- int parent_frame_index;
- constexpr static int kInvalidFrameIndex = -1;
-
- // Indices into |frames_| of frames called from the current frame.
- base::flat_map<StackFrame, int> children;
- };
-
- using ConstIterator = base::circular_deque<FrameNode>::const_iterator;
-
- StackFrameDeduplicator();
- ~StackFrameDeduplicator() override;
-
- // Inserts a backtrace where |beginFrame| is a pointer to the bottom frame
- // (e.g. main) and |endFrame| is a pointer past the top frame (most recently
- // called function), and returns the index of its leaf node in |frames_|.
- // Returns -1 if the backtrace is empty.
- int Insert(const StackFrame* beginFrame, const StackFrame* endFrame);
-
- // Iterators over the frame nodes in the call tree.
- ConstIterator begin() const { return frames_.begin(); }
- ConstIterator end() const { return frames_.end(); }
-
- // Writes the |stackFrames| dictionary as defined in https://goo.gl/GerkV8 to
- // the trace log.
- void AppendAsTraceFormat(std::string* out) const override;
-
- // Estimates memory overhead including |sizeof(StackFrameDeduplicator)|.
- void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead) override;
-
- private:
- // Checks that existing backtrace identified by |frame_index| equals
- // to the one identified by |begin_frame|, |end_frame|.
- bool Match(int frame_index,
- const StackFrame* begin_frame,
- const StackFrame* end_frame) const;
-
- base::flat_map<StackFrame, int> roots_;
- base::circular_deque<FrameNode> frames_;
-
- // {backtrace_hash -> frame_index} map for finding backtraces that are
- // already added. Backtraces themselves are not stored in the map, instead
- // Match() is used on the found frame_index to detect collisions.
- std::unordered_map<size_t, int> backtrace_lookup_table_;
-
- DISALLOW_COPY_AND_ASSIGN(StackFrameDeduplicator);
-};
-
-} // namespace trace_event
-} // namespace base
-
-#endif // BASE_TRACE_EVENT_HEAP_PROFILER_STACK_FRAME_DEDUPLICATOR_H_
diff --git a/chromium/base/trace_event/heap_profiler_stack_frame_deduplicator_unittest.cc b/chromium/base/trace_event/heap_profiler_stack_frame_deduplicator_unittest.cc
deleted file mode 100644
index 194c7aad20b..00000000000
--- a/chromium/base/trace_event/heap_profiler_stack_frame_deduplicator_unittest.cc
+++ /dev/null
@@ -1,152 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
-
-#include <iterator>
-#include <memory>
-
-#include "base/macros.h"
-#include "base/trace_event/heap_profiler_allocation_context.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace base {
-namespace trace_event {
-
-// Define all strings once, because the deduplicator requires pointer equality,
-// and string interning is unreliable.
-StackFrame kBrowserMain = StackFrame::FromTraceEventName("BrowserMain");
-StackFrame kRendererMain = StackFrame::FromTraceEventName("RendererMain");
-StackFrame kCreateWidget = StackFrame::FromTraceEventName("CreateWidget");
-StackFrame kInitialize = StackFrame::FromTraceEventName("Initialize");
-StackFrame kMalloc = StackFrame::FromTraceEventName("malloc");
-
-TEST(StackFrameDeduplicatorTest, SingleBacktrace) {
- StackFrame bt[] = {kBrowserMain, kCreateWidget, kMalloc};
-
- // The call tree should look like this (index in brackets).
- //
- // BrowserMain [0]
- // CreateWidget [1]
- // malloc [2]
-
- std::unique_ptr<StackFrameDeduplicator> dedup(new StackFrameDeduplicator);
- ASSERT_EQ(2, dedup->Insert(std::begin(bt), std::end(bt)));
-
- auto iter = dedup->begin();
- ASSERT_EQ(kBrowserMain, (iter + 0)->frame);
- ASSERT_EQ(-1, (iter + 0)->parent_frame_index);
-
- ASSERT_EQ(kCreateWidget, (iter + 1)->frame);
- ASSERT_EQ(0, (iter + 1)->parent_frame_index);
-
- ASSERT_EQ(kMalloc, (iter + 2)->frame);
- ASSERT_EQ(1, (iter + 2)->parent_frame_index);
-
- ASSERT_TRUE(iter + 3 == dedup->end());
-}
-
-TEST(StackFrameDeduplicatorTest, SingleBacktraceWithNull) {
- StackFrame null_frame = StackFrame::FromTraceEventName(nullptr);
- StackFrame bt[] = {kBrowserMain, null_frame, kMalloc};
-
- // Deduplicator doesn't care about what's inside StackFrames,
- // and handles nullptr StackFrame values as any other.
- //
- // So the call tree should look like this (index in brackets).
- //
- // BrowserMain [0]
- // (null) [1]
- // malloc [2]
-
- std::unique_ptr<StackFrameDeduplicator> dedup(new StackFrameDeduplicator);
- ASSERT_EQ(2, dedup->Insert(std::begin(bt), std::end(bt)));
-
- auto iter = dedup->begin();
- ASSERT_EQ(kBrowserMain, (iter + 0)->frame);
- ASSERT_EQ(-1, (iter + 0)->parent_frame_index);
-
- ASSERT_EQ(null_frame, (iter + 1)->frame);
- ASSERT_EQ(0, (iter + 1)->parent_frame_index);
-
- ASSERT_EQ(kMalloc, (iter + 2)->frame);
- ASSERT_EQ(1, (iter + 2)->parent_frame_index);
-
- ASSERT_TRUE(iter + 3 == dedup->end());
-}
-
-// Test that there can be different call trees (there can be multiple bottom
-// frames). Also verify that frames with the same name but a different caller
-// are represented as distinct nodes.
-TEST(StackFrameDeduplicatorTest, MultipleRoots) {
- StackFrame bt0[] = {kBrowserMain, kCreateWidget};
- StackFrame bt1[] = {kRendererMain, kCreateWidget};
-
- // The call tree should look like this (index in brackets).
- //
- // BrowserMain [0]
- // CreateWidget [1]
- // RendererMain [2]
- // CreateWidget [3]
- //
- // Note that there will be two instances of CreateWidget,
- // with different parents.
-
- std::unique_ptr<StackFrameDeduplicator> dedup(new StackFrameDeduplicator);
- ASSERT_EQ(1, dedup->Insert(std::begin(bt0), std::end(bt0)));
- ASSERT_EQ(3, dedup->Insert(std::begin(bt1), std::end(bt1)));
-
- auto iter = dedup->begin();
- ASSERT_EQ(kBrowserMain, (iter + 0)->frame);
- ASSERT_EQ(-1, (iter + 0)->parent_frame_index);
-
- ASSERT_EQ(kCreateWidget, (iter + 1)->frame);
- ASSERT_EQ(0, (iter + 1)->parent_frame_index);
-
- ASSERT_EQ(kRendererMain, (iter + 2)->frame);
- ASSERT_EQ(-1, (iter + 2)->parent_frame_index);
-
- ASSERT_EQ(kCreateWidget, (iter + 3)->frame);
- ASSERT_EQ(2, (iter + 3)->parent_frame_index);
-
- ASSERT_TRUE(iter + 4 == dedup->end());
-}
-
-TEST(StackFrameDeduplicatorTest, Deduplication) {
- StackFrame bt0[] = {kBrowserMain, kCreateWidget};
- StackFrame bt1[] = {kBrowserMain, kInitialize};
-
- // The call tree should look like this (index in brackets).
- //
- // BrowserMain [0]
- // CreateWidget [1]
- // Initialize [2]
- //
- // Note that BrowserMain will be re-used.
-
- std::unique_ptr<StackFrameDeduplicator> dedup(new StackFrameDeduplicator);
- ASSERT_EQ(1, dedup->Insert(std::begin(bt0), std::end(bt0)));
- ASSERT_EQ(2, dedup->Insert(std::begin(bt1), std::end(bt1)));
-
- auto iter = dedup->begin();
- ASSERT_EQ(kBrowserMain, (iter + 0)->frame);
- ASSERT_EQ(-1, (iter + 0)->parent_frame_index);
-
- ASSERT_EQ(kCreateWidget, (iter + 1)->frame);
- ASSERT_EQ(0, (iter + 1)->parent_frame_index);
-
- ASSERT_EQ(kInitialize, (iter + 2)->frame);
- ASSERT_EQ(0, (iter + 2)->parent_frame_index);
-
- ASSERT_TRUE(iter + 3 == dedup->end());
-
- // Inserting the same backtrace again should return the index of the existing
- // node.
- ASSERT_EQ(1, dedup->Insert(std::begin(bt0), std::end(bt0)));
- ASSERT_EQ(2, dedup->Insert(std::begin(bt1), std::end(bt1)));
- ASSERT_TRUE(dedup->begin() + 3 == dedup->end());
-}
-
-} // namespace trace_event
-} // namespace base
diff --git a/chromium/base/trace_event/heap_profiler_type_name_deduplicator.cc b/chromium/base/trace_event/heap_profiler_type_name_deduplicator.cc
deleted file mode 100644
index 360f239bbd1..00000000000
--- a/chromium/base/trace_event/heap_profiler_type_name_deduplicator.cc
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/trace_event/heap_profiler_type_name_deduplicator.h"
-
-#include <stddef.h>
-#include <stdlib.h>
-#include <string>
-#include <utility>
-
-#include "base/json/string_escape.h"
-#include "base/strings/string_split.h"
-#include "base/strings/stringprintf.h"
-#include "base/trace_event/memory_usage_estimator.h"
-#include "base/trace_event/trace_event.h"
-#include "base/trace_event/trace_event_memory_overhead.h"
-
-namespace base {
-namespace trace_event {
-
-TypeNameDeduplicator::TypeNameDeduplicator() {
- // A null pointer has type ID 0 ("unknown type");
- type_ids_.insert(std::make_pair(nullptr, 0));
-}
-
-TypeNameDeduplicator::~TypeNameDeduplicator() = default;
-
-int TypeNameDeduplicator::Insert(const char* type_name) {
- auto result = type_ids_.insert(std::make_pair(type_name, 0));
- auto& elem = result.first;
- bool did_not_exist_before = result.second;
-
- if (did_not_exist_before) {
- // The type IDs are assigned sequentially and they are zero-based, so
- // |size() - 1| is the ID of the new element.
- elem->second = static_cast<int>(type_ids_.size() - 1);
- }
-
- return elem->second;
-}
-
-void TypeNameDeduplicator::AppendAsTraceFormat(std::string* out) const {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("memory-infra"),
- "TypeNameDeduplicator::AppendAsTraceFormat");
- out->append("{"); // Begin the type names dictionary.
-
- auto it = type_ids_.begin();
- std::string buffer;
-
- // Write the first entry manually; the null pointer must not be dereferenced.
- // (The first entry is the null pointer because a |std::map| is ordered.)
- it++;
- out->append("\"0\":\"[unknown]\"");
-
- for (; it != type_ids_.end(); it++) {
- // Type IDs in the trace are strings, write them as stringified keys of
- // a dictionary.
- SStringPrintf(&buffer, ",\"%d\":", it->second);
-
- // TODO(ssid): crbug.com/594803 the type name is misused for file name in
- // some cases.
- StringPiece type_info = it->first;
-
- // |EscapeJSONString| appends, it does not overwrite |buffer|.
- bool put_in_quotes = true;
- EscapeJSONString(type_info, put_in_quotes, &buffer);
- out->append(buffer);
- }
-
- out->append("}"); // End the type names dictionary.
-}
-
-void TypeNameDeduplicator::EstimateTraceMemoryOverhead(
- TraceEventMemoryOverhead* overhead) {
- size_t memory_usage = EstimateMemoryUsage(type_ids_);
- overhead->Add(TraceEventMemoryOverhead::kHeapProfilerTypeNameDeduplicator,
- sizeof(TypeNameDeduplicator) + memory_usage);
-}
-
-} // namespace trace_event
-} // namespace base
diff --git a/chromium/base/trace_event/heap_profiler_type_name_deduplicator.h b/chromium/base/trace_event/heap_profiler_type_name_deduplicator.h
deleted file mode 100644
index 2d26c73488e..00000000000
--- a/chromium/base/trace_event/heap_profiler_type_name_deduplicator.h
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_TRACE_EVENT_HEAP_PROFILER_TYPE_NAME_DEDUPLICATOR_H_
-#define BASE_TRACE_EVENT_HEAP_PROFILER_TYPE_NAME_DEDUPLICATOR_H_
-
-#include <map>
-#include <string>
-
-#include "base/base_export.h"
-#include "base/macros.h"
-#include "base/trace_event/trace_event_impl.h"
-
-namespace base {
-namespace trace_event {
-
-class TraceEventMemoryOverhead;
-
-// Data structure that assigns a unique numeric ID to |const char*|s.
-class BASE_EXPORT TypeNameDeduplicator : public ConvertableToTraceFormat {
- public:
- TypeNameDeduplicator();
- ~TypeNameDeduplicator() override;
-
- // Inserts a type name and returns its ID.
- int Insert(const char* type_name);
-
- // Writes the type ID -> type name mapping to the trace log.
- void AppendAsTraceFormat(std::string* out) const override;
-
- // Estimates memory overhead including |sizeof(TypeNameDeduplicator)|.
- void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead) override;
-
- private:
- // Map from type name to type ID.
- std::map<const char*, int> type_ids_;
-
- DISALLOW_COPY_AND_ASSIGN(TypeNameDeduplicator);
-};
-
-} // namespace trace_event
-} // namespace base
-
-#endif // BASE_TRACE_EVENT_HEAP_PROFILER_TYPE_NAME_DEDUPLICATOR_H_
diff --git a/chromium/base/trace_event/heap_profiler_type_name_deduplicator_unittest.cc b/chromium/base/trace_event/heap_profiler_type_name_deduplicator_unittest.cc
deleted file mode 100644
index f97808bfb07..00000000000
--- a/chromium/base/trace_event/heap_profiler_type_name_deduplicator_unittest.cc
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <memory>
-#include <string>
-
-#include "base/json/json_reader.h"
-#include "base/trace_event/heap_profiler_type_name_deduplicator.h"
-#include "base/values.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace base {
-namespace trace_event {
-
-namespace {
-
-// Define all strings once, because the deduplicator requires pointer equality,
-// and string interning is unreliable.
-const char kInt[] = "int";
-const char kBool[] = "bool";
-const char kString[] = "string";
-const char kNeedsEscape[] = "\"quotes\"";
-
-std::unique_ptr<Value> DumpAndReadBack(
- const TypeNameDeduplicator& deduplicator) {
- std::string json;
- deduplicator.AppendAsTraceFormat(&json);
- return JSONReader::Read(json);
-}
-
-// Inserts a single type name into a new TypeNameDeduplicator instance and
-// checks if the value gets inserted and the exported value for |type_name| is
-// the same as |expected_value|.
-void TestInsertTypeAndReadback(const char* type_name,
- const char* expected_value) {
- std::unique_ptr<TypeNameDeduplicator> dedup(new TypeNameDeduplicator);
- ASSERT_EQ(1, dedup->Insert(type_name));
-
- std::unique_ptr<Value> type_names = DumpAndReadBack(*dedup);
- ASSERT_NE(nullptr, type_names);
-
- const DictionaryValue* dictionary;
- ASSERT_TRUE(type_names->GetAsDictionary(&dictionary));
-
- // When the type name was inserted, it got ID 1. The exported key "1"
- // should be equal to |expected_value|.
- std::string value;
- ASSERT_TRUE(dictionary->GetString("1", &value));
- ASSERT_EQ(expected_value, value);
-}
-
-} // namespace
-
-TEST(TypeNameDeduplicatorTest, Deduplication) {
- // The type IDs should be like this:
- // 0: [unknown]
- // 1: int
- // 2: bool
- // 3: string
-
- std::unique_ptr<TypeNameDeduplicator> dedup(new TypeNameDeduplicator);
- ASSERT_EQ(1, dedup->Insert(kInt));
- ASSERT_EQ(2, dedup->Insert(kBool));
- ASSERT_EQ(3, dedup->Insert(kString));
-
- // Inserting again should return the same IDs.
- ASSERT_EQ(2, dedup->Insert(kBool));
- ASSERT_EQ(1, dedup->Insert(kInt));
- ASSERT_EQ(3, dedup->Insert(kString));
-
- // A null pointer should yield type ID 0.
- ASSERT_EQ(0, dedup->Insert(nullptr));
-}
-
-TEST(TypeNameDeduplicatorTest, EscapeTypeName) {
- // Reading json should not fail, because the type name should have been
- // escaped properly and exported value should contain quotes.
- TestInsertTypeAndReadback(kNeedsEscape, kNeedsEscape);
-}
-
-} // namespace trace_event
-} // namespace base
diff --git a/chromium/base/trace_event/java_heap_dump_provider_android_unittest.cc b/chromium/base/trace_event/java_heap_dump_provider_android_unittest.cc
index 9b9eb1792cf..4deaf839ec5 100644
--- a/chromium/base/trace_event/java_heap_dump_provider_android_unittest.cc
+++ b/chromium/base/trace_event/java_heap_dump_provider_android_unittest.cc
@@ -13,8 +13,7 @@ namespace trace_event {
TEST(JavaHeapDumpProviderTest, JavaHeapDump) {
auto* jhdp = JavaHeapDumpProvider::GetInstance();
MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
- std::unique_ptr<ProcessMemoryDump> pmd(
- new ProcessMemoryDump(nullptr, dump_args));
+ std::unique_ptr<ProcessMemoryDump> pmd(new ProcessMemoryDump(dump_args));
jhdp->OnMemoryDump(dump_args, pmd.get());
}
diff --git a/chromium/base/trace_event/memory_allocator_dump_unittest.cc b/chromium/base/trace_event/memory_allocator_dump_unittest.cc
index b0b6e741a9b..78a545ff847 100644
--- a/chromium/base/trace_event/memory_allocator_dump_unittest.cc
+++ b/chromium/base/trace_event/memory_allocator_dump_unittest.cc
@@ -8,7 +8,6 @@
#include "base/format_macros.h"
#include "base/strings/stringprintf.h"
-#include "base/trace_event/heap_profiler_serialization_state.h"
#include "base/trace_event/memory_allocator_dump_guid.h"
#include "base/trace_event/memory_dump_provider.h"
#include "base/trace_event/process_memory_dump.h"
@@ -85,7 +84,7 @@ TEST(MemoryAllocatorDumpTest, GuidGeneration) {
TEST(MemoryAllocatorDumpTest, DumpIntoProcessMemoryDump) {
FakeMemoryAllocatorDumpProvider fmadp;
MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
- ProcessMemoryDump pmd(new HeapProfilerSerializationState, dump_args);
+ ProcessMemoryDump pmd(dump_args);
fmadp.OnMemoryDump(dump_args, &pmd);
@@ -120,12 +119,11 @@ TEST(MemoryAllocatorDumpTest, DumpIntoProcessMemoryDump) {
// Check that calling serialization routines doesn't cause a crash.
std::unique_ptr<TracedValue> traced_value(new TracedValue);
pmd.SerializeAllocatorDumpsInto(traced_value.get());
- pmd.SerializeHeapProfilerDumpsInto(traced_value.get());
}
TEST(MemoryAllocatorDumpTest, GetSize) {
MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
- ProcessMemoryDump pmd(new HeapProfilerSerializationState, dump_args);
+ ProcessMemoryDump pmd(dump_args);
MemoryAllocatorDump* dump = pmd.CreateAllocatorDump("allocator_for_size");
dump->AddScalar(MemoryAllocatorDump::kNameSize,
MemoryAllocatorDump::kUnitsBytes, 1);
@@ -135,7 +133,7 @@ TEST(MemoryAllocatorDumpTest, GetSize) {
TEST(MemoryAllocatorDumpTest, ReadValues) {
MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
- ProcessMemoryDump pmd(new HeapProfilerSerializationState, dump_args);
+ ProcessMemoryDump pmd(dump_args);
MemoryAllocatorDump* dump = pmd.CreateAllocatorDump("allocator_for_size");
dump->AddScalar("one", "byte", 1);
dump->AddString("one", "object", "one");
@@ -159,7 +157,7 @@ TEST(MemoryAllocatorDumpTest, MovingAnEntry) {
TEST(MemoryAllocatorDumpTest, ForbidDuplicatesDeathTest) {
FakeMemoryAllocatorDumpProvider fmadp;
MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
- ProcessMemoryDump pmd(new HeapProfilerSerializationState, dump_args);
+ ProcessMemoryDump pmd(dump_args);
pmd.CreateAllocatorDump("foo_allocator");
pmd.CreateAllocatorDump("bar_allocator/heap");
ASSERT_DEATH(pmd.CreateAllocatorDump("foo_allocator"), "");
@@ -169,7 +167,7 @@ TEST(MemoryAllocatorDumpTest, ForbidDuplicatesDeathTest) {
TEST(MemoryAllocatorDumpTest, ForbidStringsInBackgroundModeDeathTest) {
MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::BACKGROUND};
- ProcessMemoryDump pmd(new HeapProfilerSerializationState, dump_args);
+ ProcessMemoryDump pmd(dump_args);
MemoryAllocatorDump* dump = pmd.CreateAllocatorDump("malloc");
ASSERT_DEATH(dump->AddString("foo", "bar", "baz"), "");
}
diff --git a/chromium/base/trace_event/memory_dump_manager.cc b/chromium/base/trace_event/memory_dump_manager.cc
index f6cc8324662..d61528af8e5 100644
--- a/chromium/base/trace_event/memory_dump_manager.cc
+++ b/chromium/base/trace_event/memory_dump_manager.cc
@@ -26,14 +26,10 @@
#include "base/trace_event/heap_profiler.h"
#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
#include "base/trace_event/heap_profiler_event_filter.h"
-#include "base/trace_event/heap_profiler_serialization_state.h"
-#include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
-#include "base/trace_event/heap_profiler_type_name_deduplicator.h"
#include "base/trace_event/malloc_dump_provider.h"
#include "base/trace_event/memory_dump_provider.h"
#include "base/trace_event/memory_dump_scheduler.h"
#include "base/trace_event/memory_infra_background_whitelist.h"
-#include "base/trace_event/memory_peak_detector.h"
#include "base/trace_event/process_memory_dump.h"
#include "base/trace_event/trace_event.h"
#include "base/trace_event/trace_event_argument.h"
@@ -53,14 +49,11 @@ namespace trace_event {
namespace {
-const char* const kTraceEventArgNames[] = {"dumps"};
-const unsigned char kTraceEventArgTypes[] = {TRACE_VALUE_TYPE_CONVERTABLE};
-
MemoryDumpManager* g_memory_dump_manager_for_testing = nullptr;
-// Temporary (until peak detector and scheduler are moved outside of here)
+// Temporary (until scheduler is moved outside of here)
// trampoline function to match the |request_dump_function| passed to Initialize
-// to the callback expected by MemoryPeakDetector and MemoryDumpScheduler.
+// to the callback expected by MemoryDumpScheduler.
// TODO(primiano): remove this.
void DoGlobalDumpWithoutCallback(
MemoryDumpManager::RequestGlobalDumpFunction global_dump_fn,
@@ -69,83 +62,6 @@ void DoGlobalDumpWithoutCallback(
global_dump_fn.Run(dump_type, level_of_detail);
}
-// Proxy class which wraps a ConvertableToTraceFormat owned by the
-// |heap_profiler_serialization_state| into a proxy object that can be added to
-// the trace event log. This is to solve the problem that the
-// HeapProfilerSerializationState is refcounted but the tracing subsystem wants
-// a std::unique_ptr<ConvertableToTraceFormat>.
-template <typename T>
-struct SessionStateConvertableProxy : public ConvertableToTraceFormat {
- using GetterFunctPtr = T* (HeapProfilerSerializationState::*)() const;
-
- SessionStateConvertableProxy(scoped_refptr<HeapProfilerSerializationState>
- heap_profiler_serialization_state,
- GetterFunctPtr getter_function)
- : heap_profiler_serialization_state(heap_profiler_serialization_state),
- getter_function(getter_function) {}
-
- void AppendAsTraceFormat(std::string* out) const override {
- return (heap_profiler_serialization_state.get()->*getter_function)()
- ->AppendAsTraceFormat(out);
- }
-
- void EstimateTraceMemoryOverhead(
- TraceEventMemoryOverhead* overhead) override {
- return (heap_profiler_serialization_state.get()->*getter_function)()
- ->EstimateTraceMemoryOverhead(overhead);
- }
-
- scoped_refptr<HeapProfilerSerializationState>
- heap_profiler_serialization_state;
- GetterFunctPtr const getter_function;
-};
-
-void NotifyHeapProfilingEnabledOnMDPThread(
- scoped_refptr<MemoryDumpProviderInfo> mdpinfo,
- bool profiling_enabled) {
- mdpinfo->dump_provider->OnHeapProfilingEnabled(profiling_enabled);
-}
-
-inline bool ShouldEnableMDPAllocatorHooks(HeapProfilingMode mode) {
- return (mode == kHeapProfilingModePseudo) ||
- (mode == kHeapProfilingModeNative) ||
- (mode == kHeapProfilingModeBackground);
-}
-
-#if BUILDFLAG(USE_ALLOCATOR_SHIM) && !defined(OS_NACL)
-inline bool IsHeapProfilingModeEnabled(HeapProfilingMode mode) {
- return mode != kHeapProfilingModeDisabled &&
- mode != kHeapProfilingModeInvalid;
-}
-
-void EnableFilteringForPseudoStackProfiling() {
- if (AllocationContextTracker::capture_mode() !=
- AllocationContextTracker::CaptureMode::PSEUDO_STACK ||
- (TraceLog::GetInstance()->enabled_modes() & TraceLog::FILTERING_MODE)) {
- return;
- }
- // Create trace config with heap profiling filter.
- std::string filter_string = JoinString(
- {"*", TRACE_DISABLED_BY_DEFAULT("net"), TRACE_DISABLED_BY_DEFAULT("cc"),
- MemoryDumpManager::kTraceCategory},
- ",");
- TraceConfigCategoryFilter category_filter;
- category_filter.InitializeFromString(filter_string);
-
- TraceConfig::EventFilterConfig heap_profiler_filter_config(
- HeapProfilerEventFilter::kName);
- heap_profiler_filter_config.SetCategoryFilter(category_filter);
-
- TraceConfig::EventFilters filters;
- filters.push_back(heap_profiler_filter_config);
- TraceConfig filtering_trace_config;
- filtering_trace_config.SetEventFilters(filters);
-
- TraceLog::GetInstance()->SetEnabled(filtering_trace_config,
- TraceLog::FILTERING_MODE);
-}
-#endif // BUILDFLAG(USE_ALLOCATOR_SHIM) && !defined(OS_NACL)
-
} // namespace
// static
@@ -187,8 +103,7 @@ MemoryDumpManager::CreateInstanceForTesting() {
MemoryDumpManager::MemoryDumpManager()
: is_coordinator_(false),
tracing_process_id_(kInvalidTracingProcessId),
- dumper_registrations_ignored_for_testing_(false),
- heap_profiling_mode_(kHeapProfilingModeDisabled) {}
+ dumper_registrations_ignored_for_testing_(false) {}
MemoryDumpManager::~MemoryDumpManager() {
Thread* dump_thread = nullptr;
@@ -206,93 +121,6 @@ MemoryDumpManager::~MemoryDumpManager() {
g_memory_dump_manager_for_testing = nullptr;
}
-bool MemoryDumpManager::EnableHeapProfiling(HeapProfilingMode profiling_mode) {
- AutoLock lock(lock_);
-#if BUILDFLAG(USE_ALLOCATOR_SHIM) && !defined(OS_NACL)
- bool notify_mdps = true;
-
- if (heap_profiling_mode_ == kHeapProfilingModeInvalid)
- return false; // Disabled permanently.
-
- if (IsHeapProfilingModeEnabled(heap_profiling_mode_) ==
- IsHeapProfilingModeEnabled(profiling_mode)) {
- if (profiling_mode == kHeapProfilingModeDisabled)
- heap_profiling_mode_ = kHeapProfilingModeInvalid; // Disable permanently.
- return false;
- }
-
- switch (profiling_mode) {
- case kHeapProfilingModeTaskProfiler:
- if (!base::debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled())
- base::debug::ThreadHeapUsageTracker::EnableHeapTracking();
- notify_mdps = false;
- break;
-
- case kHeapProfilingModeBackground:
- AllocationContextTracker::SetCaptureMode(
- AllocationContextTracker::CaptureMode::MIXED_STACK);
- break;
-
- case kHeapProfilingModePseudo:
- AllocationContextTracker::SetCaptureMode(
- AllocationContextTracker::CaptureMode::PSEUDO_STACK);
- EnableFilteringForPseudoStackProfiling();
- break;
-
- case kHeapProfilingModeNative:
-#if defined(OS_ANDROID) && BUILDFLAG(CAN_UNWIND_WITH_CFI_TABLE)
- {
- bool can_unwind = CFIBacktraceAndroid::GetInitializedInstance()
- ->can_unwind_stack_frames();
- DCHECK(can_unwind);
- }
-#endif
- // If we don't have frame pointers and unwind tables then native tracing
- // falls-back to using base::debug::StackTrace, which may be slow.
- AllocationContextTracker::SetCaptureMode(
- AllocationContextTracker::CaptureMode::NATIVE_STACK);
- break;
-
- case kHeapProfilingModeDisabled:
- if (heap_profiling_mode_ == kHeapProfilingModeTaskProfiler) {
- LOG(ERROR) << "ThreadHeapUsageTracker cannot be disabled.";
- return false;
- }
- if (heap_profiling_mode_ == kHeapProfilingModePseudo)
- TraceLog::GetInstance()->SetDisabled(TraceLog::FILTERING_MODE);
- AllocationContextTracker::SetCaptureMode(
- AllocationContextTracker::CaptureMode::DISABLED);
- heap_profiling_mode_ = kHeapProfilingModeInvalid; // Disable permanently.
- break;
-
- default:
- NOTREACHED() << "Incorrect heap profiling mode " << profiling_mode;
- return false;
- }
-
- if (heap_profiling_mode_ != kHeapProfilingModeInvalid)
- heap_profiling_mode_ = profiling_mode;
-
- // In case tracing was already enabled, setup the serialization state before
- // notifying mdps.
- InitializeHeapProfilerStateIfNeededLocked();
- if (notify_mdps) {
- bool enabled = IsHeapProfilingModeEnabled(heap_profiling_mode_);
- for (const auto& mdpinfo : dump_providers_)
- NotifyHeapProfilingEnabledLocked(mdpinfo, enabled);
- }
- return true;
-#else
- heap_profiling_mode_ = kHeapProfilingModeInvalid;
- return false;
-#endif // BUILDFLAG(USE_ALLOCATOR_SHIM) && !defined(OS_NACL)
-}
-
-HeapProfilingMode MemoryDumpManager::GetHeapProfilingMode() {
- AutoLock lock(lock_);
- return heap_profiling_mode_;
-}
-
void MemoryDumpManager::Initialize(
RequestGlobalDumpFunction request_dump_function,
bool is_coordinator) {
@@ -306,10 +134,7 @@ void MemoryDumpManager::Initialize(
// Enable the core dump providers.
#if defined(MALLOC_MEMORY_TRACING_SUPPORTED)
- base::trace_event::MemoryDumpProvider::Options options;
- options.supports_heap_profiling = true;
- RegisterDumpProvider(MallocDumpProvider::GetInstance(), "Malloc", nullptr,
- options);
+ RegisterDumpProvider(MallocDumpProvider::GetInstance(), "Malloc", nullptr);
#endif
#if defined(OS_ANDROID)
@@ -368,11 +193,6 @@ void MemoryDumpManager::RegisterDumpProviderInternal(
new MemoryDumpProviderInfo(mdp, name, std::move(task_runner), options,
whitelisted_for_background_mode);
- if (options.is_fast_polling_supported) {
- DCHECK(!mdpinfo->task_runner) << "MemoryDumpProviders capable of fast "
- "polling must NOT be thread bound.";
- }
-
{
AutoLock lock(lock_);
bool already_registered = !dump_providers_.insert(mdpinfo).second;
@@ -380,12 +200,6 @@ void MemoryDumpManager::RegisterDumpProviderInternal(
// path for RenderThreadImpl::Init().
if (already_registered)
return;
-
- if (options.is_fast_polling_supported)
- MemoryPeakDetector::GetInstance()->NotifyMemoryDumpProvidersChanged();
-
- if (ShouldEnableMDPAllocatorHooks(heap_profiling_mode_))
- NotifyHeapProfilingEnabledLocked(mdpinfo, true);
}
}
@@ -421,7 +235,6 @@ void MemoryDumpManager::UnregisterDumpProviderInternal(
// - At the end of this function, if no dump is in progress.
// - In ContinueAsyncProcessDump() when MDPInfo is removed from
// |pending_dump_providers|.
- // - When the provider is removed from other clients (MemoryPeakDetector).
DCHECK(!(*mdp_iter)->owned_dump_provider);
(*mdp_iter)->owned_dump_provider = std::move(owned_mdp);
} else {
@@ -440,11 +253,6 @@ void MemoryDumpManager::UnregisterDumpProviderInternal(
<< "unregister itself in a racy way. Please file a crbug.";
}
- if ((*mdp_iter)->options.is_fast_polling_supported) {
- DCHECK(take_mdp_ownership_and_delete_async);
- MemoryPeakDetector::GetInstance()->NotifyMemoryDumpProvidersChanged();
- }
-
// The MDPInfo instance can still be referenced by the
// |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason
// the MDPInfo is flagged as disabled. It will cause InvokeOnMemoryDump()
@@ -454,16 +262,6 @@ void MemoryDumpManager::UnregisterDumpProviderInternal(
dump_providers_.erase(mdp_iter);
}
-void MemoryDumpManager::GetDumpProvidersForPolling(
- std::vector<scoped_refptr<MemoryDumpProviderInfo>>* providers) {
- DCHECK(providers->empty());
- AutoLock lock(lock_);
- for (const scoped_refptr<MemoryDumpProviderInfo>& mdp : dump_providers_) {
- if (mdp->options.is_fast_polling_supported)
- providers->push_back(mdp);
- }
-}
-
bool MemoryDumpManager::IsDumpProviderRegisteredForTesting(
MemoryDumpProvider* provider) {
AutoLock lock(lock_);
@@ -512,25 +310,8 @@ void MemoryDumpManager::CreateProcessDump(
{
AutoLock lock(lock_);
- // MDM could have been disabled by this point destroying
- // |heap_profiler_serialization_state|. If heap profiling is enabled we
- // require session state so if heap profiling is on and session state is
- // absent we fail the dump immediately. If heap profiler is enabled during
- // the dump, then the dump succeeds since the dump was requested before, and
- // the future process dumps will contain heap dumps.
- if (args.dump_type != MemoryDumpType::SUMMARY_ONLY &&
- ShouldEnableMDPAllocatorHooks(heap_profiling_mode_) &&
- !heap_profiler_serialization_state_) {
- callback.Run(false /* success */, args.dump_guid, nullptr);
- return;
- }
-
pmd_async_state.reset(new ProcessMemoryDumpAsyncState(
- args, dump_providers_, heap_profiler_serialization_state_, callback,
- GetOrCreateBgTaskRunnerLocked()));
-
- // If enabled, holds back the peak detector resetting its estimation window.
- MemoryPeakDetector::GetInstance()->Throttle();
+ args, dump_providers_, callback, GetOrCreateBgTaskRunnerLocked()));
}
// Start the process dump. This involves task runner hops as specified by the
@@ -695,33 +476,6 @@ void MemoryDumpManager::FinishAsyncProcessDump(
TRACE_EVENT0(kTraceCategory, "MemoryDumpManager::FinishAsyncProcessDump");
- // In the general case (allocators and edges) the serialization into the trace
- // buffer is handled by the memory-infra service (see tracing_observer.cc).
- // This special case below deals only with serialization of the heap profiler
- // and is temporary given the upcoming work on the out-of-process heap
- // profiler.
- const auto& args = pmd_async_state->req_args;
- if (!pmd_async_state->process_memory_dump->heap_dumps().empty()) {
- std::unique_ptr<TracedValue> traced_value = std::make_unique<TracedValue>();
- pmd_async_state->process_memory_dump->SerializeHeapProfilerDumpsInto(
- traced_value.get());
-
- traced_value->SetString("level_of_detail",
- base::trace_event::MemoryDumpLevelOfDetailToString(
- args.level_of_detail));
- std::unique_ptr<base::trace_event::ConvertableToTraceFormat> event_value(
- std::move(traced_value));
- TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_PROCESS_ID(
- TRACE_EVENT_PHASE_MEMORY_DUMP,
- base::trace_event::TraceLog::GetCategoryGroupEnabled(
- base::trace_event::MemoryDumpManager::kTraceCategory),
- base::trace_event::MemoryDumpTypeToString(args.dump_type),
- trace_event_internal::kGlobalScope, args.dump_guid,
- base::kNullProcessId, 1 /* num_args */, kTraceEventArgNames,
- kTraceEventArgTypes, nullptr /* arg_values */, &event_value,
- TRACE_EVENT_FLAG_HAS_ID);
- }
-
if (!pmd_async_state->callback.is_null()) {
pmd_async_state->callback.Run(
true /* success */, dump_guid,
@@ -736,17 +490,11 @@ void MemoryDumpManager::FinishAsyncProcessDump(
void MemoryDumpManager::SetupForTracing(
const TraceConfig::MemoryDumpConfig& memory_dump_config) {
AutoLock lock(lock_);
- heap_profiler_serialization_state_ = new HeapProfilerSerializationState();
- heap_profiler_serialization_state_
- ->set_heap_profiler_breakdown_threshold_bytes(
- memory_dump_config.heap_profiler_options.breakdown_threshold_bytes);
- InitializeHeapProfilerStateIfNeededLocked();
// At this point we must have the ability to request global dumps.
DCHECK(can_request_global_dumps());
MemoryDumpScheduler::Config periodic_config;
- bool peak_detector_configured = false;
for (const auto& trigger : memory_dump_config.triggers) {
if (trigger.trigger_type == MemoryDumpType::PERIODIC_INTERVAL) {
if (periodic_config.triggers.empty()) {
@@ -756,34 +504,6 @@ void MemoryDumpManager::SetupForTracing(
}
periodic_config.triggers.push_back(
{trigger.level_of_detail, trigger.min_time_between_dumps_ms});
- } else if (trigger.trigger_type == MemoryDumpType::PEAK_MEMORY_USAGE) {
- // At most one peak trigger is allowed.
- CHECK(!peak_detector_configured);
- peak_detector_configured = true;
- MemoryPeakDetector::GetInstance()->Setup(
- BindRepeating(&MemoryDumpManager::GetDumpProvidersForPolling,
- Unretained(this)),
- GetOrCreateBgTaskRunnerLocked(),
- BindRepeating(&DoGlobalDumpWithoutCallback, request_dump_function_,
- MemoryDumpType::PEAK_MEMORY_USAGE,
- trigger.level_of_detail));
-
- MemoryPeakDetector::Config peak_config;
- peak_config.polling_interval_ms = 10;
- peak_config.min_time_between_peaks_ms = trigger.min_time_between_dumps_ms;
- peak_config.enable_verbose_poll_tracing =
- trigger.level_of_detail == MemoryDumpLevelOfDetail::DETAILED;
- MemoryPeakDetector::GetInstance()->Start(peak_config);
-
- // When peak detection is enabled, trigger a dump straight away as it
- // gives a good reference point for analyzing the trace.
- if (is_coordinator_) {
- GetOrCreateBgTaskRunnerLocked()->PostTask(
- FROM_HERE,
- BindOnce(&DoGlobalDumpWithoutCallback, request_dump_function_,
- MemoryDumpType::PEAK_MEMORY_USAGE,
- trigger.level_of_detail));
- }
}
}
@@ -801,73 +521,21 @@ void MemoryDumpManager::TeardownForTracing() {
AutoLock lock(lock_);
MemoryDumpScheduler::GetInstance()->Stop();
- MemoryPeakDetector::GetInstance()->TearDown();
- heap_profiler_serialization_state_ = nullptr;
-}
-
-void MemoryDumpManager::InitializeHeapProfilerStateIfNeededLocked() {
- lock_.AssertAcquired();
- if (!ShouldEnableMDPAllocatorHooks(heap_profiling_mode_) ||
- !heap_profiler_serialization_state_ ||
- heap_profiler_serialization_state_->is_initialized()) {
- return;
- }
- // If heap profiling is enabled, the stack frame deduplicator and type name
- // deduplicator will be in use. Add a metadata events to write the frames
- // and type IDs.
- heap_profiler_serialization_state_->SetStackFrameDeduplicator(
- WrapUnique(new StackFrameDeduplicator));
- heap_profiler_serialization_state_->SetTypeNameDeduplicator(
- WrapUnique(new TypeNameDeduplicator));
-
- TRACE_EVENT_API_ADD_METADATA_EVENT(
- TraceLog::GetCategoryGroupEnabled("__metadata"), "stackFrames",
- "stackFrames",
- std::make_unique<SessionStateConvertableProxy<StackFrameDeduplicator>>(
- heap_profiler_serialization_state_,
- &HeapProfilerSerializationState::stack_frame_deduplicator));
-
- TRACE_EVENT_API_ADD_METADATA_EVENT(
- TraceLog::GetCategoryGroupEnabled("__metadata"), "typeNames", "typeNames",
- std::make_unique<SessionStateConvertableProxy<TypeNameDeduplicator>>(
- heap_profiler_serialization_state_,
- &HeapProfilerSerializationState::type_name_deduplicator));
-}
-
-void MemoryDumpManager::NotifyHeapProfilingEnabledLocked(
- scoped_refptr<MemoryDumpProviderInfo> mdpinfo,
- bool enabled) {
- lock_.AssertAcquired();
- if (!mdpinfo->options.supports_heap_profiling)
- return;
-
- const auto& task_runner = mdpinfo->task_runner
- ? mdpinfo->task_runner
- : GetOrCreateBgTaskRunnerLocked();
- // TODO(ssid): Post tasks only for MDPs that support heap profiling.
- task_runner->PostTask(
- FROM_HERE,
- BindOnce(&NotifyHeapProfilingEnabledOnMDPThread, mdpinfo, enabled));
}
MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState(
MemoryDumpRequestArgs req_args,
const MemoryDumpProviderInfo::OrderedSet& dump_providers,
- scoped_refptr<HeapProfilerSerializationState>
- heap_profiler_serialization_state_in,
ProcessMemoryDumpCallback callback,
scoped_refptr<SequencedTaskRunner> dump_thread_task_runner)
: req_args(req_args),
- heap_profiler_serialization_state(
- std::move(heap_profiler_serialization_state_in)),
callback(callback),
callback_task_runner(ThreadTaskRunnerHandle::Get()),
dump_thread_task_runner(std::move(dump_thread_task_runner)) {
pending_dump_providers.reserve(dump_providers.size());
pending_dump_providers.assign(dump_providers.rbegin(), dump_providers.rend());
MemoryDumpArgs args = {req_args.level_of_detail, req_args.dump_guid};
- process_memory_dump = std::make_unique<ProcessMemoryDump>(
- heap_profiler_serialization_state, args);
+ process_memory_dump = std::make_unique<ProcessMemoryDump>(args);
}
MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() =
diff --git a/chromium/base/trace_event/memory_dump_manager.h b/chromium/base/trace_event/memory_dump_manager.h
index 072a7d6dee5..6033cfbb511 100644
--- a/chromium/base/trace_event/memory_dump_manager.h
+++ b/chromium/base/trace_event/memory_dump_manager.h
@@ -33,16 +33,6 @@ class Thread;
namespace trace_event {
class MemoryDumpProvider;
-class HeapProfilerSerializationState;
-
-enum HeapProfilingMode {
- kHeapProfilingModeDisabled,
- kHeapProfilingModeTaskProfiler, // Per task counters for allocs and frees.
- kHeapProfilingModeBackground, // Pseudo stacks without default filtering.
- kHeapProfilingModePseudo, // Pseudo stacks with default filtering categories.
- kHeapProfilingModeNative, // Native stacks
- kHeapProfilingModeInvalid // Disabled permanently or unsupported.
-};
// This is the interface exposed to the rest of the codebase to deal with
// memory tracing. The main entry point for clients is represented by
@@ -72,8 +62,7 @@ class BASE_EXPORT MemoryDumpManager {
// request_dump_function: Function to invoke a global dump. Global dump
// involves embedder-specific behaviors like multiprocess handshaking.
// TODO(primiano): this is only required to trigger global dumps from
- // the scheduler and the peak detector. Should be removed once they are
- // both moved out of base.
+ // the scheduler. Should be removed once they are both moved out of base.
void Initialize(RequestGlobalDumpFunction request_dump_function,
bool is_coordinator);
@@ -109,15 +98,13 @@ class BASE_EXPORT MemoryDumpManager {
// This method takes ownership of the dump provider and guarantees that:
// - The |mdp| will be deleted at some point in the near future.
// - Its deletion will not happen concurrently with the OnMemoryDump() call.
- // Note that OnMemoryDump() and PollFastMemoryTotal() calls can still happen
- // after this method returns.
+ // Note that OnMemoryDump() calls can still happen after this method returns.
void UnregisterAndDeleteDumpProviderSoon(
std::unique_ptr<MemoryDumpProvider> mdp);
// Prepare MemoryDumpManager for CreateProcessDump() calls for tracing-related
// modes (i.e. |level_of_detail| != SUMMARY_ONLY).
- // Also initializes the peak detector, scheduler and heap profiler with the
- // given config.
+ // Also initializes the scheduler with the given config.
void SetupForTracing(const TraceConfig::MemoryDumpConfig&);
// Tear-down tracing related state.
@@ -131,20 +118,9 @@ class BASE_EXPORT MemoryDumpManager {
void CreateProcessDump(const MemoryDumpRequestArgs& args,
const ProcessMemoryDumpCallback& callback);
- // Enable heap profiling with specified |profiling_mode|.
- // Use kHeapProfilingModeDisabled to disable, but it can't be re-enabled then.
- // Returns true if mode has been *changed* to the desired |profiling_mode|.
- bool EnableHeapProfiling(HeapProfilingMode profiling_mode);
- HeapProfilingMode GetHeapProfilingMode();
-
// Lets tests see if a dump provider is registered.
bool IsDumpProviderRegisteredForTesting(MemoryDumpProvider*);
- const scoped_refptr<HeapProfilerSerializationState>&
- heap_profiler_serialization_state_for_testing() const {
- return heap_profiler_serialization_state_;
- }
-
// Returns a unique id for identifying the processes. The id can be
// retrieved by child processes only when tracing is enabled. This is
// intended to express cross-process sharing of memory dumps on the
@@ -182,8 +158,6 @@ class BASE_EXPORT MemoryDumpManager {
ProcessMemoryDumpAsyncState(
MemoryDumpRequestArgs req_args,
const MemoryDumpProviderInfo::OrderedSet& dump_providers,
- scoped_refptr<HeapProfilerSerializationState>
- heap_profiler_serialization_state,
ProcessMemoryDumpCallback callback,
scoped_refptr<SequencedTaskRunner> dump_thread_task_runner);
~ProcessMemoryDumpAsyncState();
@@ -199,12 +173,6 @@ class BASE_EXPORT MemoryDumpManager {
// and becomes empty at the end, when all dump providers have been invoked.
std::vector<scoped_refptr<MemoryDumpProviderInfo>> pending_dump_providers;
- // The HeapProfilerSerializationState object, which is shared by all
- // the ProcessMemoryDump and MemoryAllocatorDump instances through all the
- // tracing session lifetime.
- scoped_refptr<HeapProfilerSerializationState>
- heap_profiler_serialization_state;
-
// Callback passed to the initial call to CreateProcessDump().
ProcessMemoryDumpCallback callback;
@@ -261,21 +229,6 @@ class BASE_EXPORT MemoryDumpManager {
void UnregisterDumpProviderInternal(MemoryDumpProvider* mdp,
bool take_mdp_ownership_and_delete_async);
- // Fills the passed vector with the subset of dump providers which were
- // registered with is_fast_polling_supported == true.
- void GetDumpProvidersForPolling(
- std::vector<scoped_refptr<MemoryDumpProviderInfo>>*);
-
- // Initialize |heap_profiler_serialization_state_| when tracing and heap
- // profiler are enabled.
- void InitializeHeapProfilerStateIfNeededLocked();
-
- // Sends OnHeapProfilingEnabled() notifcation to mdp ensuring OnMemoryDump()
- // is not called at the same time.
- void NotifyHeapProfilingEnabledLocked(
- scoped_refptr<MemoryDumpProviderInfo> mdpinfo,
- bool enabled);
-
bool can_request_global_dumps() const {
return !request_dump_function_.is_null();
}
@@ -284,10 +237,6 @@ class BASE_EXPORT MemoryDumpManager {
// runner affinity (MDPs belonging to the same task runners are adjacent).
MemoryDumpProviderInfo::OrderedSet dump_providers_;
- // Shared among all the PMDs to keep state scoped to the tracing session.
- scoped_refptr<HeapProfilerSerializationState>
- heap_profiler_serialization_state_;
-
// Function provided by the embedder to handle global dump requests.
RequestGlobalDumpFunction request_dump_function_;
@@ -309,8 +258,6 @@ class BASE_EXPORT MemoryDumpManager {
// When true, calling |RegisterMemoryDumpProvider| is a no-op.
bool dumper_registrations_ignored_for_testing_;
- HeapProfilingMode heap_profiling_mode_;
-
DISALLOW_COPY_AND_ASSIGN(MemoryDumpManager);
};
diff --git a/chromium/base/trace_event/memory_dump_manager_unittest.cc b/chromium/base/trace_event/memory_dump_manager_unittest.cc
index e92045edcb8..706df2dafe6 100644
--- a/chromium/base/trace_event/memory_dump_manager_unittest.cc
+++ b/chromium/base/trace_event/memory_dump_manager_unittest.cc
@@ -108,9 +108,6 @@ class MockMemoryDumpProvider : public MemoryDumpProvider {
MOCK_METHOD0(Destructor, void());
MOCK_METHOD2(OnMemoryDump,
bool(const MemoryDumpArgs& args, ProcessMemoryDump* pmd));
- MOCK_METHOD1(OnHeapProfilingEnabled, void(bool enabled));
- MOCK_METHOD1(PollFastMemoryTotal, void(uint64_t* memory_total));
- MOCK_METHOD0(SuspendFastMemoryPolling, void());
MockMemoryDumpProvider() : enable_mock_destructor(false) {
ON_CALL(*this, OnMemoryDump(_, _))
@@ -118,10 +115,6 @@ class MockMemoryDumpProvider : public MemoryDumpProvider {
Invoke([](const MemoryDumpArgs&, ProcessMemoryDump* pmd) -> bool {
return true;
}));
-
- ON_CALL(*this, PollFastMemoryTotal(_))
- .WillByDefault(
- Invoke([](uint64_t* memory_total) -> void { NOTREACHED(); }));
}
~MockMemoryDumpProvider() override {
if (enable_mock_destructor)
@@ -329,44 +322,6 @@ TEST_F(MemoryDumpManagerTest, CheckMemoryDumpArgs) {
mdm_->UnregisterDumpProvider(&mdp);
}
-// Checks that the HeapProfilerSerializationState object is actually
-// shared over time.
-TEST_F(MemoryDumpManagerTest, HeapProfilerSerializationState) {
- MockMemoryDumpProvider mdp1;
- MockMemoryDumpProvider mdp2;
- RegisterDumpProvider(&mdp1, nullptr);
- RegisterDumpProvider(&mdp2, nullptr);
-
- EnableForTracing();
- const HeapProfilerSerializationState* heap_profiler_serialization_state =
- mdm_->heap_profiler_serialization_state_for_testing().get();
- EXPECT_CALL(mdp1, OnMemoryDump(_, _))
- .Times(2)
- .WillRepeatedly(
- Invoke([heap_profiler_serialization_state](
- const MemoryDumpArgs&, ProcessMemoryDump* pmd) -> bool {
- EXPECT_EQ(heap_profiler_serialization_state,
- pmd->heap_profiler_serialization_state().get());
- return true;
- }));
- EXPECT_CALL(mdp2, OnMemoryDump(_, _))
- .Times(2)
- .WillRepeatedly(
- Invoke([heap_profiler_serialization_state](
- const MemoryDumpArgs&, ProcessMemoryDump* pmd) -> bool {
- EXPECT_EQ(heap_profiler_serialization_state,
- pmd->heap_profiler_serialization_state().get());
- return true;
- }));
-
- for (int i = 0; i < 2; ++i) {
- EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
- MemoryDumpLevelOfDetail::DETAILED));
- }
-
- DisableTracing();
-}
-
// Checks that the (Un)RegisterDumpProvider logic behaves sanely.
TEST_F(MemoryDumpManagerTest, MultipleDumpers) {
MockMemoryDumpProvider mdp1;
@@ -826,141 +781,6 @@ TEST_F(MemoryDumpManagerTest, UnregisterAndDeleteDumpProviderSoonDuringDump) {
DisableTracing();
}
-#if BUILDFLAG(USE_ALLOCATOR_SHIM) && !defined(OS_NACL)
-TEST_F(MemoryDumpManagerTest, EnableHeapProfilingPseudoStack) {
- MockMemoryDumpProvider mdp1;
- MockMemoryDumpProvider mdp2;
- MockMemoryDumpProvider mdp3;
- MemoryDumpProvider::Options supported_options;
- supported_options.supports_heap_profiling = true;
- RegisterDumpProvider(&mdp1, ThreadTaskRunnerHandle::Get(), supported_options);
- {
- testing::InSequence sequence;
- EXPECT_CALL(mdp1, OnHeapProfilingEnabled(true)).Times(1);
- EXPECT_CALL(mdp1, OnHeapProfilingEnabled(false)).Times(1);
- }
- {
- testing::InSequence sequence;
- EXPECT_CALL(mdp2, OnHeapProfilingEnabled(true)).Times(1);
- EXPECT_CALL(mdp2, OnHeapProfilingEnabled(false)).Times(1);
- }
- RegisterDumpProvider(&mdp3, ThreadTaskRunnerHandle::Get());
- EXPECT_CALL(mdp3, OnHeapProfilingEnabled(_)).Times(0);
-
- EXPECT_TRUE(mdm_->EnableHeapProfiling(kHeapProfilingModePseudo));
- RunLoop().RunUntilIdle();
- ASSERT_EQ(AllocationContextTracker::CaptureMode::PSEUDO_STACK,
- AllocationContextTracker::capture_mode());
- EXPECT_EQ(mdm_->GetHeapProfilingMode(), kHeapProfilingModePseudo);
- EXPECT_EQ(TraceLog::FILTERING_MODE, TraceLog::GetInstance()->enabled_modes());
- RegisterDumpProvider(&mdp2, ThreadTaskRunnerHandle::Get(), supported_options);
-
- TraceConfig::MemoryDumpConfig config;
- config.heap_profiler_options.breakdown_threshold_bytes = 100;
- mdm_->SetupForTracing(config);
- EXPECT_EQ(config.heap_profiler_options.breakdown_threshold_bytes,
- mdm_->heap_profiler_serialization_state_for_testing()
- ->heap_profiler_breakdown_threshold_bytes());
- EXPECT_TRUE(
- mdm_->heap_profiler_serialization_state_for_testing()->is_initialized());
- EXPECT_EQ(mdm_->GetHeapProfilingMode(), kHeapProfilingModePseudo);
- mdm_->TeardownForTracing();
- EXPECT_FALSE(mdm_->heap_profiler_serialization_state_for_testing());
-
- // Disable will permanently disable heap profiling.
- EXPECT_TRUE(mdm_->EnableHeapProfiling(kHeapProfilingModeDisabled));
- RunLoop().RunUntilIdle();
- EXPECT_EQ(mdm_->GetHeapProfilingMode(), kHeapProfilingModeInvalid);
- EXPECT_EQ(0, TraceLog::GetInstance()->enabled_modes());
- EXPECT_FALSE(mdm_->heap_profiler_serialization_state_for_testing());
- ASSERT_EQ(AllocationContextTracker::CaptureMode::DISABLED,
- AllocationContextTracker::capture_mode());
- EXPECT_FALSE(mdm_->EnableHeapProfiling(kHeapProfilingModePseudo));
- ASSERT_EQ(AllocationContextTracker::CaptureMode::DISABLED,
- AllocationContextTracker::capture_mode());
- EXPECT_EQ(mdm_->GetHeapProfilingMode(), kHeapProfilingModeInvalid);
- EXPECT_FALSE(mdm_->EnableHeapProfiling(kHeapProfilingModeDisabled));
- EXPECT_EQ(mdm_->GetHeapProfilingMode(), kHeapProfilingModeInvalid);
-}
-
-TEST_F(MemoryDumpManagerTestAsCoordinator, EnableHeapProfilingBackground) {
- MockMemoryDumpProvider mdp1;
- MemoryDumpProvider::Options supported_options;
- supported_options.supports_heap_profiling = true;
- RegisterDumpProvider(&mdp1, ThreadTaskRunnerHandle::Get(), supported_options);
- testing::InSequence sequence;
- EXPECT_CALL(mdp1, OnHeapProfilingEnabled(true)).Times(1);
- EXPECT_CALL(mdp1, OnHeapProfilingEnabled(false)).Times(1);
-
- // Enable tracing before heap profiling.
- TraceConfig::MemoryDumpConfig config;
- config.heap_profiler_options.breakdown_threshold_bytes = 100;
- mdm_->SetupForTracing(config);
- EXPECT_EQ(config.heap_profiler_options.breakdown_threshold_bytes,
- mdm_->heap_profiler_serialization_state_for_testing()
- ->heap_profiler_breakdown_threshold_bytes());
- EXPECT_FALSE(
- mdm_->heap_profiler_serialization_state_for_testing()->is_initialized());
-
- EXPECT_TRUE(mdm_->EnableHeapProfiling(kHeapProfilingModeBackground));
- RunLoop().RunUntilIdle();
- ASSERT_EQ(AllocationContextTracker::CaptureMode::MIXED_STACK,
- AllocationContextTracker::capture_mode());
- EXPECT_EQ(mdm_->GetHeapProfilingMode(), kHeapProfilingModeBackground);
- EXPECT_EQ(0u, TraceLog::GetInstance()->enabled_modes());
- EXPECT_TRUE(
- mdm_->heap_profiler_serialization_state_for_testing()->is_initialized());
- // Do nothing when already enabled.
- EXPECT_FALSE(mdm_->EnableHeapProfiling(kHeapProfilingModeBackground));
- EXPECT_FALSE(mdm_->EnableHeapProfiling(kHeapProfilingModePseudo));
- ASSERT_EQ(AllocationContextTracker::CaptureMode::MIXED_STACK,
- AllocationContextTracker::capture_mode());
- EXPECT_EQ(0u, TraceLog::GetInstance()->enabled_modes());
- EXPECT_EQ(mdm_->GetHeapProfilingMode(), kHeapProfilingModeBackground);
- // Disable will permanently disable heap profiling.
- EXPECT_TRUE(mdm_->EnableHeapProfiling(kHeapProfilingModeDisabled));
- RunLoop().RunUntilIdle();
- ASSERT_EQ(AllocationContextTracker::CaptureMode::DISABLED,
- AllocationContextTracker::capture_mode());
- EXPECT_FALSE(mdm_->EnableHeapProfiling(kHeapProfilingModePseudo));
- ASSERT_EQ(AllocationContextTracker::CaptureMode::DISABLED,
- AllocationContextTracker::capture_mode());
- EXPECT_EQ(mdm_->GetHeapProfilingMode(), kHeapProfilingModeInvalid);
- EXPECT_FALSE(mdm_->EnableHeapProfiling(kHeapProfilingModeDisabled));
- RunLoop().RunUntilIdle();
- EXPECT_EQ(mdm_->GetHeapProfilingMode(), kHeapProfilingModeInvalid);
- mdm_->TeardownForTracing();
- EXPECT_FALSE(mdm_->heap_profiler_serialization_state_for_testing());
-}
-
-TEST_F(MemoryDumpManagerTestAsCoordinator, EnableHeapProfilingTask) {
- MockMemoryDumpProvider mdp1;
- MockMemoryDumpProvider mdp2;
- MemoryDumpProvider::Options supported_options;
- supported_options.supports_heap_profiling = true;
- RegisterDumpProvider(&mdp1, ThreadTaskRunnerHandle::Get(), supported_options);
- EXPECT_CALL(mdp1, OnHeapProfilingEnabled(_)).Times(0);
- EXPECT_CALL(mdp2, OnHeapProfilingEnabled(_)).Times(0);
-
- ASSERT_FALSE(base::debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled());
- EXPECT_TRUE(mdm_->EnableHeapProfiling(kHeapProfilingModeTaskProfiler));
- RunLoop().RunUntilIdle();
- ASSERT_EQ(AllocationContextTracker::CaptureMode::DISABLED,
- AllocationContextTracker::capture_mode());
- RegisterDumpProvider(&mdp2, ThreadTaskRunnerHandle::Get(), supported_options);
- EXPECT_EQ(mdm_->GetHeapProfilingMode(), kHeapProfilingModeTaskProfiler);
- ASSERT_TRUE(debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled());
- TestingThreadHeapUsageTracker::DisableHeapTrackingForTesting();
- ASSERT_FALSE(base::debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled());
-}
-
-TEST_F(MemoryDumpManagerTestAsCoordinator, EnableHeapProfilingDisableDisabled) {
- ASSERT_EQ(mdm_->GetHeapProfilingMode(), kHeapProfilingModeDisabled);
- EXPECT_FALSE(mdm_->EnableHeapProfiling(kHeapProfilingModeDisabled));
- EXPECT_EQ(mdm_->GetHeapProfilingMode(), kHeapProfilingModeInvalid);
-}
-#endif // BUILDFLAG(USE_ALLOCATOR_SHIM) && !defined(OS_NACL)
-
// Mock MDP class that tests if the number of OnMemoryDump() calls are expected.
// It is implemented without gmocks since EXPECT_CALL implementation is slow
// when there are 1000s of instances, as required in
diff --git a/chromium/base/trace_event/memory_dump_provider.h b/chromium/base/trace_event/memory_dump_provider.h
index b458bfb1ea5..f55e2cf4719 100644
--- a/chromium/base/trace_event/memory_dump_provider.h
+++ b/chromium/base/trace_event/memory_dump_provider.h
@@ -20,24 +20,12 @@ class BASE_EXPORT MemoryDumpProvider {
public:
// Optional arguments for MemoryDumpManager::RegisterDumpProvider().
struct Options {
- Options()
- : dumps_on_single_thread_task_runner(false),
- is_fast_polling_supported(false),
- supports_heap_profiling(false) {}
+ Options() : dumps_on_single_thread_task_runner(false) {}
// |dumps_on_single_thread_task_runner| is true if the dump provider runs on
// a SingleThreadTaskRunner, which is usually the case. It is faster to run
// all providers that run on the same thread together without thread hops.
bool dumps_on_single_thread_task_runner;
-
- // Set to true if the dump provider implementation supports high frequency
- // polling. Only providers running without task runner affinity are
- // supported.
- bool is_fast_polling_supported;
-
- // Set to true when the dump provider supports heap profiling. MDM sends
- // OnHeapProfiling() notifications only if this is set to true.
- bool supports_heap_profiling;
};
virtual ~MemoryDumpProvider() = default;
@@ -52,23 +40,6 @@ class BASE_EXPORT MemoryDumpProvider {
virtual bool OnMemoryDump(const MemoryDumpArgs& args,
ProcessMemoryDump* pmd) = 0;
- // Called by the MemoryDumpManager when an allocator should start or stop
- // collecting extensive allocation data, if supported. Called only when
- // |supports_heap_profiling| is set to true.
- virtual void OnHeapProfilingEnabled(bool enabled) {}
-
- // Quickly record the total memory usage in |memory_total|. This method will
- // be called only when the dump provider registration has
- // |is_fast_polling_supported| set to true. This method is used for polling at
- // high frequency for detecting peaks. See comment on
- // |is_fast_polling_supported| option if you need to override this method.
- virtual void PollFastMemoryTotal(uint64_t* memory_total) {}
-
- // Indicates that fast memory polling is not going to be used in the near
- // future and the MDP can tear down any resource kept around for fast memory
- // polling.
- virtual void SuspendFastMemoryPolling() {}
-
protected:
MemoryDumpProvider() = default;
diff --git a/chromium/base/trace_event/memory_dump_request_args.cc b/chromium/base/trace_event/memory_dump_request_args.cc
index 3cb9cabae52..8be3c324047 100644
--- a/chromium/base/trace_event/memory_dump_request_args.cc
+++ b/chromium/base/trace_event/memory_dump_request_args.cc
@@ -16,8 +16,6 @@ const char* MemoryDumpTypeToString(const MemoryDumpType& dump_type) {
return "periodic_interval";
case MemoryDumpType::EXPLICITLY_TRIGGERED:
return "explicitly_triggered";
- case MemoryDumpType::PEAK_MEMORY_USAGE:
- return "peak_memory_usage";
case MemoryDumpType::SUMMARY_ONLY:
return "summary_only";
}
@@ -30,8 +28,6 @@ MemoryDumpType StringToMemoryDumpType(const std::string& str) {
return MemoryDumpType::PERIODIC_INTERVAL;
if (str == "explicitly_triggered")
return MemoryDumpType::EXPLICITLY_TRIGGERED;
- if (str == "peak_memory_usage")
- return MemoryDumpType::PEAK_MEMORY_USAGE;
if (str == "summary_only")
return MemoryDumpType::SUMMARY_ONLY;
NOTREACHED();
diff --git a/chromium/base/trace_event/memory_dump_request_args.h b/chromium/base/trace_event/memory_dump_request_args.h
index 41bc99bc724..f854a4b3732 100644
--- a/chromium/base/trace_event/memory_dump_request_args.h
+++ b/chromium/base/trace_event/memory_dump_request_args.h
@@ -30,7 +30,6 @@ class ProcessMemoryDump;
enum class MemoryDumpType {
PERIODIC_INTERVAL, // Dumping memory at periodic intervals.
EXPLICITLY_TRIGGERED, // Non maskable dump request.
- PEAK_MEMORY_USAGE, // Dumping memory at detected peak total memory usage.
SUMMARY_ONLY, // Calculate just the summary & don't add to the trace.
LAST = SUMMARY_ONLY
};
diff --git a/chromium/base/trace_event/memory_infra_background_whitelist.cc b/chromium/base/trace_event/memory_infra_background_whitelist.cc
index aa80a5671f7..40f5ac8ccc6 100644
--- a/chromium/base/trace_event/memory_infra_background_whitelist.cc
+++ b/chromium/base/trace_event/memory_infra_background_whitelist.cc
@@ -115,6 +115,7 @@ const char* const kAllocatorDumpNameWhitelist[] = {
"mojo",
"mojo/data_pipe_consumer",
"mojo/data_pipe_producer",
+ "mojo/invitation",
"mojo/messages",
"mojo/message_pipe",
"mojo/platform_handle",
@@ -256,6 +257,7 @@ const char* const kAllocatorDumpNameWhitelist[] = {
"v8/isolate_0x?/heap_spaces/large_object_space",
"v8/isolate_0x?/heap_spaces/map_space",
"v8/isolate_0x?/heap_spaces/new_space",
+ "v8/isolate_0x?/heap_spaces/new_large_object_space",
"v8/isolate_0x?/heap_spaces/old_space",
"v8/isolate_0x?/heap_spaces/read_only_space",
"v8/isolate_0x?/malloc",
@@ -291,6 +293,7 @@ const char* const kAllocatorDumpNameWhitelist[] = {
"sync/0x?/model_type/MANAGED_USER_SETTING",
"sync/0x?/model_type/MANAGED_USER_SHARED_SETTING",
"sync/0x?/model_type/MANAGED_USER_WHITELIST",
+ "sync/0x?/model_type/MOUNTAIN_SHARE",
"sync/0x?/model_type/NIGORI",
"sync/0x?/model_type/PASSWORD",
"sync/0x?/model_type/PREFERENCE",
@@ -303,6 +306,7 @@ const char* const kAllocatorDumpNameWhitelist[] = {
"sync/0x?/model_type/SYNCED_NOTIFICATION_APP_INFO",
"sync/0x?/model_type/THEME",
"sync/0x?/model_type/TYPED_URL",
+ "sync/0x?/model_type/USER_CONSENT",
"sync/0x?/model_type/USER_EVENT",
"sync/0x?/model_type/WALLET_METADATA",
"sync/0x?/model_type/WIFI_CREDENTIAL",
diff --git a/chromium/base/trace_event/memory_infra_background_whitelist_unittest.cc b/chromium/base/trace_event/memory_infra_background_whitelist_unittest.cc
new file mode 100644
index 00000000000..3037eb1da72
--- /dev/null
+++ b/chromium/base/trace_event/memory_infra_background_whitelist_unittest.cc
@@ -0,0 +1,37 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_infra_background_whitelist.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace trace_event {
+
+TEST(MemoryInfraBackgroundWhitelist, Whitelist) {
+ // Global dumps that are of hex digits are all whitelisted for background use.
+ EXPECT_TRUE(IsMemoryAllocatorDumpNameWhitelisted("global/01234ABCDEF"));
+ EXPECT_TRUE(
+ IsMemoryAllocatorDumpNameWhitelisted("shared_memory/01234ABCDEF"));
+
+ // Global dumps that contain non-hex digits are not whitelisted.
+ EXPECT_FALSE(IsMemoryAllocatorDumpNameWhitelisted("global/GHIJK"));
+ EXPECT_FALSE(IsMemoryAllocatorDumpNameWhitelisted("shared_memory/GHIJK"));
+
+ // Test a couple that contain pointer values.
+ EXPECT_TRUE(IsMemoryAllocatorDumpNameWhitelisted("net/url_request_context"));
+ EXPECT_TRUE(IsMemoryAllocatorDumpNameWhitelisted(
+ "net/url_request_context/app_request/0x123/cookie_monster"));
+ EXPECT_TRUE(
+ IsMemoryAllocatorDumpNameWhitelisted("net/http_network_session_0x123"));
+ EXPECT_FALSE(
+ IsMemoryAllocatorDumpNameWhitelisted("net/http_network_session/0x123"));
+ EXPECT_TRUE(IsMemoryAllocatorDumpNameWhitelisted(
+ "net/http_network_session_0x123/quic_stream_factory"));
+}
+
+} // namespace trace_event
+
+} // namespace base
diff --git a/chromium/base/trace_event/memory_peak_detector.cc b/chromium/base/trace_event/memory_peak_detector.cc
deleted file mode 100644
index 541959406ca..00000000000
--- a/chromium/base/trace_event/memory_peak_detector.cc
+++ /dev/null
@@ -1,288 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/trace_event/memory_peak_detector.h"
-
-#include <algorithm>
-
-#include "base/bind.h"
-#include "base/logging.h"
-#include "base/sys_info.h"
-#include "base/threading/sequenced_task_runner_handle.h"
-#include "base/time/time.h"
-#include "base/trace_event/memory_dump_manager.h"
-#include "base/trace_event/memory_dump_provider_info.h"
-#include "base/trace_event/trace_event.h"
-#include "build/build_config.h"
-
-namespace base {
-namespace trace_event {
-
-// static
-MemoryPeakDetector* MemoryPeakDetector::GetInstance() {
- static MemoryPeakDetector* instance = new MemoryPeakDetector();
- return instance;
-}
-
-MemoryPeakDetector::MemoryPeakDetector()
- : generation_(0),
- state_(NOT_INITIALIZED),
- poll_tasks_count_for_testing_(0) {}
-
-MemoryPeakDetector::~MemoryPeakDetector() {
- // This is hit only in tests, in which case the test is expected to TearDown()
- // cleanly and not leave the peak detector running.
- DCHECK_EQ(NOT_INITIALIZED, state_);
-}
-
-void MemoryPeakDetector::Setup(
- const GetDumpProvidersFunction& get_dump_providers_function,
- const scoped_refptr<SequencedTaskRunner>& task_runner,
- const OnPeakDetectedCallback& on_peak_detected_callback) {
- DCHECK(!get_dump_providers_function.is_null());
- DCHECK(task_runner);
- DCHECK(!on_peak_detected_callback.is_null());
- DCHECK(state_ == NOT_INITIALIZED || state_ == DISABLED);
- DCHECK(dump_providers_.empty());
- get_dump_providers_function_ = get_dump_providers_function;
- task_runner_ = task_runner;
- on_peak_detected_callback_ = on_peak_detected_callback;
- state_ = DISABLED;
- config_ = {};
- ResetPollHistory();
-
- static_threshold_bytes_ = 0;
-#if !defined(OS_NACL)
- // Set threshold to 1% of total system memory.
- static_threshold_bytes_ =
- static_cast<uint64_t>(SysInfo::AmountOfPhysicalMemory()) / 100;
-#endif
- // Fallback, mostly for test environments where AmountOfPhysicalMemory() is
- // broken.
- static_threshold_bytes_ =
- std::max(static_threshold_bytes_, static_cast<uint64_t>(5 * 1024 * 1024));
-}
-
-void MemoryPeakDetector::TearDown() {
- if (task_runner_) {
- task_runner_->PostTask(
- FROM_HERE,
- BindOnce(&MemoryPeakDetector::TearDownInternal, Unretained(this)));
- }
- task_runner_ = nullptr;
-}
-
-void MemoryPeakDetector::Start(MemoryPeakDetector::Config config) {
- if (!config.polling_interval_ms) {
- NOTREACHED();
- return;
- }
- task_runner_->PostTask(FROM_HERE, BindOnce(&MemoryPeakDetector::StartInternal,
- Unretained(this), config));
-}
-
-void MemoryPeakDetector::Stop() {
- task_runner_->PostTask(
- FROM_HERE, BindOnce(&MemoryPeakDetector::StopInternal, Unretained(this)));
-}
-
-void MemoryPeakDetector::Throttle() {
- if (!task_runner_)
- return; // Can be called before Setup().
- task_runner_->PostTask(
- FROM_HERE, BindOnce(&MemoryPeakDetector::ResetPollHistory,
- Unretained(this), true /* keep_last_sample */));
-}
-
-void MemoryPeakDetector::NotifyMemoryDumpProvidersChanged() {
- if (!task_runner_)
- return; // Can be called before Setup().
- task_runner_->PostTask(
- FROM_HERE,
- BindOnce(&MemoryPeakDetector::ReloadDumpProvidersAndStartPollingIfNeeded,
- Unretained(this)));
-}
-
-void MemoryPeakDetector::StartInternal(MemoryPeakDetector::Config config) {
- DCHECK_EQ(DISABLED, state_);
- state_ = ENABLED;
- config_ = config;
- ResetPollHistory();
-
- // If there are any dump providers available,
- // NotifyMemoryDumpProvidersChanged will fetch them and start the polling.
- // Otherwise this will remain in the ENABLED state and the actual polling
- // will start on the next call to
- // ReloadDumpProvidersAndStartPollingIfNeeded().
- // Depending on the sandbox model, it is possible that no polling-capable
- // dump providers will be ever available.
- ReloadDumpProvidersAndStartPollingIfNeeded();
-}
-
-void MemoryPeakDetector::StopInternal() {
- DCHECK_NE(NOT_INITIALIZED, state_);
- state_ = DISABLED;
- ++generation_;
- for (const scoped_refptr<MemoryDumpProviderInfo>& mdp_info : dump_providers_)
- mdp_info->dump_provider->SuspendFastMemoryPolling();
- dump_providers_.clear();
-}
-
-void MemoryPeakDetector::TearDownInternal() {
- StopInternal();
- get_dump_providers_function_.Reset();
- on_peak_detected_callback_.Reset();
- state_ = NOT_INITIALIZED;
-}
-
-void MemoryPeakDetector::ReloadDumpProvidersAndStartPollingIfNeeded() {
- if (state_ == DISABLED || state_ == NOT_INITIALIZED)
- return; // Start() will re-fetch the MDP list later.
-
- DCHECK((state_ == RUNNING && !dump_providers_.empty()) ||
- (state_ == ENABLED && dump_providers_.empty()));
-
- dump_providers_.clear();
-
- // This is really MemoryDumpManager::GetDumpProvidersForPolling, % testing.
- get_dump_providers_function_.Run(&dump_providers_);
-
- if (state_ == ENABLED && !dump_providers_.empty()) {
- // It's now time to start polling for realz.
- state_ = RUNNING;
- task_runner_->PostTask(
- FROM_HERE, BindOnce(&MemoryPeakDetector::PollMemoryAndDetectPeak,
- Unretained(this), ++generation_));
- } else if (state_ == RUNNING && dump_providers_.empty()) {
- // Will cause the next PollMemoryAndDetectPeak() task to early return.
- state_ = ENABLED;
- ++generation_;
- }
-}
-
-void MemoryPeakDetector::PollMemoryAndDetectPeak(uint32_t expected_generation) {
- if (state_ != RUNNING || generation_ != expected_generation)
- return;
-
- // We should never end up in a situation where state_ == RUNNING but all dump
- // providers are gone.
- DCHECK(!dump_providers_.empty());
-
- poll_tasks_count_for_testing_++;
- uint64_t polled_mem_bytes = 0;
- for (const scoped_refptr<MemoryDumpProviderInfo>& mdp_info :
- dump_providers_) {
- DCHECK(mdp_info->options.is_fast_polling_supported);
- uint64_t value = 0;
- mdp_info->dump_provider->PollFastMemoryTotal(&value);
- polled_mem_bytes += value;
- }
- if (config_.enable_verbose_poll_tracing) {
- TRACE_COUNTER1(MemoryDumpManager::kTraceCategory, "PolledMemoryMB",
- polled_mem_bytes / 1024 / 1024);
- }
-
- // Peak detection logic. Design doc: https://goo.gl/0kOU4A .
- bool is_peak = false;
- if (skip_polls_ > 0) {
- skip_polls_--;
- } else if (last_dump_memory_total_ == 0) {
- last_dump_memory_total_ = polled_mem_bytes;
- } else if (polled_mem_bytes > 0) {
- int64_t diff_from_last_dump = polled_mem_bytes - last_dump_memory_total_;
-
- DCHECK_GT(static_threshold_bytes_, 0u);
- is_peak =
- diff_from_last_dump > static_cast<int64_t>(static_threshold_bytes_);
-
- if (!is_peak)
- is_peak = DetectPeakUsingSlidingWindowStddev(polled_mem_bytes);
- }
-
- DCHECK_GT(config_.polling_interval_ms, 0u);
- SequencedTaskRunnerHandle::Get()->PostDelayedTask(
- FROM_HERE,
- BindOnce(&MemoryPeakDetector::PollMemoryAndDetectPeak, Unretained(this),
- expected_generation),
- TimeDelta::FromMilliseconds(config_.polling_interval_ms));
-
- if (!is_peak)
- return;
- TRACE_EVENT_INSTANT1(MemoryDumpManager::kTraceCategory,
- "Peak memory detected", TRACE_EVENT_SCOPE_PROCESS,
- "PolledMemoryMB", polled_mem_bytes / 1024 / 1024);
- ResetPollHistory(true /* keep_last_sample */);
- last_dump_memory_total_ = polled_mem_bytes;
- on_peak_detected_callback_.Run();
-}
-
-bool MemoryPeakDetector::DetectPeakUsingSlidingWindowStddev(
- uint64_t polled_mem_bytes) {
- DCHECK(polled_mem_bytes);
- samples_bytes_[samples_index_] = polled_mem_bytes;
- samples_index_ = (samples_index_ + 1) % kSlidingWindowNumSamples;
- float mean = 0;
- for (uint32_t i = 0; i < kSlidingWindowNumSamples; ++i) {
- if (samples_bytes_[i] == 0)
- return false; // Not enough samples to detect peaks.
- mean += samples_bytes_[i];
- }
- mean /= kSlidingWindowNumSamples;
- float variance = 0;
- for (uint32_t i = 0; i < kSlidingWindowNumSamples; ++i) {
- const float deviation = samples_bytes_[i] - mean;
- variance += deviation * deviation;
- }
- variance /= kSlidingWindowNumSamples;
-
- // If stddev is less than 0.2% then we consider that the process is inactive.
- if (variance < (mean / 500) * (mean / 500))
- return false;
-
- // (mean + 3.69 * stddev) corresponds to a value that is higher than current
- // sample with 99.99% probability.
- const float cur_sample_deviation = polled_mem_bytes - mean;
- return cur_sample_deviation * cur_sample_deviation > (3.69 * 3.69 * variance);
-}
-
-void MemoryPeakDetector::ResetPollHistory(bool keep_last_sample) {
- // TODO(primiano,ssid): this logic should probably be revisited. In the case
- // of Android, the browser process sees the total of all processes memory in
- // the same peak detector instance. Perhaps the best thing to do here is to
- // keep the window of samples around and just bump the skip_polls_.
- last_dump_memory_total_ = 0;
- if (keep_last_sample) {
- const uint32_t prev_index =
- samples_index_ > 0 ? samples_index_ - 1 : kSlidingWindowNumSamples - 1;
- last_dump_memory_total_ = samples_bytes_[prev_index];
- }
- memset(samples_bytes_, 0, sizeof(samples_bytes_));
- samples_index_ = 0;
- skip_polls_ = 0;
- if (config_.polling_interval_ms > 0) {
- skip_polls_ =
- (config_.min_time_between_peaks_ms + config_.polling_interval_ms - 1) /
- config_.polling_interval_ms;
- }
-}
-
-void MemoryPeakDetector::SetStaticThresholdForTesting(
- uint64_t static_threshold_bytes) {
- DCHECK_EQ(DISABLED, state_);
- static_threshold_bytes_ = static_threshold_bytes;
-}
-
-MemoryPeakDetector::MemoryPeakDetector::Config::Config()
- : Config(0, 0, false) {}
-
-MemoryPeakDetector::MemoryPeakDetector::Config::Config(
- uint32_t polling_interval_ms,
- uint32_t min_time_between_peaks_ms,
- bool enable_verbose_poll_tracing)
- : polling_interval_ms(polling_interval_ms),
- min_time_between_peaks_ms(min_time_between_peaks_ms),
- enable_verbose_poll_tracing(enable_verbose_poll_tracing) {}
-
-} // namespace trace_event
-} // namespace base
diff --git a/chromium/base/trace_event/memory_peak_detector.h b/chromium/base/trace_event/memory_peak_detector.h
deleted file mode 100644
index bbe205ba2e4..00000000000
--- a/chromium/base/trace_event/memory_peak_detector.h
+++ /dev/null
@@ -1,184 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_TRACE_EVENT_MEMORY_PEAK_DETECTOR_H_
-#define BASE_TRACE_EVENT_MEMORY_PEAK_DETECTOR_H_
-
-#include <stdint.h>
-
-#include <memory>
-#include <vector>
-
-#include "base/base_export.h"
-#include "base/callback.h"
-#include "base/macros.h"
-#include "base/memory/ref_counted.h"
-
-namespace base {
-
-class SequencedTaskRunner;
-
-namespace trace_event {
-
-struct MemoryDumpProviderInfo;
-
-// Detects temporally local memory peaks. Peak detection is based on
-// continuously querying memory usage using MemoryDumpprovider(s) that support
-// fast polling (e.g., ProcessMetricsDumpProvider which under the hoods reads
-// /proc/PID/statm on Linux) and using a combination of:
-// - An static threshold (currently 1% of total system memory).
-// - Sliding window stddev analysis.
-// Design doc: https://goo.gl/0kOU4A .
-// This class is NOT thread-safe, the caller has to ensure linearization of
-// the calls to the public methods. In any case, the public methods do NOT have
-// to be called from the |task_runner| on which the polling tasks run.
-class BASE_EXPORT MemoryPeakDetector {
- public:
- using OnPeakDetectedCallback = RepeatingClosure;
- using DumpProvidersList = std::vector<scoped_refptr<MemoryDumpProviderInfo>>;
- using GetDumpProvidersFunction = RepeatingCallback<void(DumpProvidersList*)>;
-
- enum State {
- NOT_INITIALIZED = 0, // Before Setup()
- DISABLED, // Before Start() or after Stop().
- ENABLED, // After Start() but no dump_providers_ are available.
- RUNNING // After Start(). The PollMemoryAndDetectPeak() task is scheduled.
- };
-
- // Peak detector configuration, passed to Start().
- struct BASE_EXPORT Config {
- Config();
- Config(uint32_t polling_interval_ms,
- uint32_t min_time_between_peaks_ms,
- bool enable_verbose_poll_tracing);
-
- // The rate at which memory will be polled. Polls will happen on the task
- // runner passed to Setup().
- uint32_t polling_interval_ms;
-
- // Two consecutive peak detection callbacks will happen at least
- // |min_time_between_peaks_ms| apart from each other.
- uint32_t min_time_between_peaks_ms;
-
- // When enabled causes a TRACE_COUNTER event to be injected in the trace
- // for each poll (if tracing is enabled).
- bool enable_verbose_poll_tracing;
- };
-
- static MemoryPeakDetector* GetInstance();
-
- // Configures the peak detector, binding the polling tasks on the given
- // thread. Setup() can be called several times, provided that: (1) Stop()
- // is called; (2a) the previous task_runner is flushed or (2b) the task_runner
- // remains the same.
- // GetDumpProvidersFunction: is the function that will be invoked to get
- // an updated list of polling-capable dump providers. This is really just
- // MemoryDumpManager::GetDumpProvidersForPolling, but this extra level of
- // indirection allows easier testing.
- // SequencedTaskRunner: the task runner where PollMemoryAndDetectPeak() will
- // be periodically called.
- // OnPeakDetectedCallback: a callback that will be invoked on the
- // given task runner when a memory peak is detected.
- void Setup(const GetDumpProvidersFunction&,
- const scoped_refptr<SequencedTaskRunner>&,
- const OnPeakDetectedCallback&);
-
- // Releases the |task_runner_| and the bound callbacks.
- void TearDown();
-
- // This posts a task onto the passed task runner which refreshes the list of
- // dump providers via the GetDumpProvidersFunction. If at least one dump
- // provider is available, this starts immediately polling on the task runner.
- // If not, the detector remains in the ENABLED state and will start polling
- // automatically (i.e. without requiring another call to Start()) on the
- // next call to NotifyMemoryDumpProvidersChanged().
- void Start(Config);
-
- // Stops the polling on the task runner (if was active at all). This doesn't
- // wait for the task runner to drain pending tasks, so it is possible that
- // a polling will happen concurrently (or in the immediate future) with the
- // Stop() call. It is responsibility of the caller to drain or synchronize
- // with the task runner.
- void Stop();
-
- // If Start()-ed, prevents that a peak callback is triggered before the next
- // |min_time_between_peaks_ms|. No-op if the peak detector is not enabled.
- void Throttle();
-
- // Used by MemoryDumpManager to notify that the list of polling-capable dump
- // providers has changed. The peak detector will reload the list on the next
- // polling task. This function can be called before Setup(), in which
- // case will be just a no-op.
- void NotifyMemoryDumpProvidersChanged();
-
- void SetStaticThresholdForTesting(uint64_t static_threshold_bytes);
-
- private:
- friend class MemoryPeakDetectorTest;
-
- static constexpr uint32_t kSlidingWindowNumSamples = 50;
-
- MemoryPeakDetector();
- ~MemoryPeakDetector();
-
- // All these methods are always called on the |task_runner_|.
- void StartInternal(Config);
- void StopInternal();
- void TearDownInternal();
- void ReloadDumpProvidersAndStartPollingIfNeeded();
- void PollMemoryAndDetectPeak(uint32_t expected_generation);
- bool DetectPeakUsingSlidingWindowStddev(uint64_t last_sample_bytes);
- void ResetPollHistory(bool keep_last_sample = false);
-
- // It is safe to call these testing methods only on the |task_runner_|.
- State state_for_testing() const { return state_; }
- uint32_t poll_tasks_count_for_testing() const {
- return poll_tasks_count_for_testing_;
- }
-
- // The task runner where all the internal calls are posted onto. This field
- // must be NOT be accessed by the tasks posted on the |task_runner_| because
- // there might still be outstanding tasks on the |task_runner_| while this
- // refptr is reset. This can only be safely accessed by the public methods
- // above, which the client of this class is supposed to call sequentially.
- scoped_refptr<SequencedTaskRunner> task_runner_;
-
- // After the Setup() call, the fields below, must be accessed only from
- // the |task_runner_|.
-
- // Bound function to get an updated list of polling-capable dump providers.
- GetDumpProvidersFunction get_dump_providers_function_;
-
- // The callback to invoke when peaks are detected.
- OnPeakDetectedCallback on_peak_detected_callback_;
-
- // List of polling-aware dump providers to invoke upon each poll.
- DumpProvidersList dump_providers_;
-
- // The generation is incremented every time the |state_| is changed and causes
- // PollMemoryAndDetectPeak() to early out if the posted task doesn't match the
- // most recent |generation_|. This allows to drop on the floor outstanding
- // PostDelayedTask that refer to an old sequence that was later Stop()-ed or
- // disabled because of NotifyMemoryDumpProvidersChanged().
- uint32_t generation_;
-
- State state_;
-
- // Config passed to Start(), only valid when |state_| = {ENABLED, RUNNING}.
- Config config_;
-
- uint64_t static_threshold_bytes_;
- uint32_t skip_polls_;
- uint64_t last_dump_memory_total_;
- uint64_t samples_bytes_[kSlidingWindowNumSamples];
- uint32_t samples_index_;
- uint32_t poll_tasks_count_for_testing_;
-
- DISALLOW_COPY_AND_ASSIGN(MemoryPeakDetector);
-};
-
-} // namespace trace_event
-} // namespace base
-
-#endif // BASE_TRACE_EVENT_MEMORY_PEAK_DETECTOR_H_
diff --git a/chromium/base/trace_event/memory_peak_detector_unittest.cc b/chromium/base/trace_event/memory_peak_detector_unittest.cc
deleted file mode 100644
index bc10c80d921..00000000000
--- a/chromium/base/trace_event/memory_peak_detector_unittest.cc
+++ /dev/null
@@ -1,564 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/trace_event/memory_peak_detector.h"
-
-#include <memory>
-
-#include "base/bind.h"
-#include "base/logging.h"
-#include "base/run_loop.h"
-#include "base/synchronization/waitable_event.h"
-#include "base/threading/platform_thread.h"
-#include "base/threading/thread.h"
-#include "base/threading/thread_task_runner_handle.h"
-#include "base/trace_event/memory_dump_provider.h"
-#include "base/trace_event/memory_dump_provider_info.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using ::testing::_;
-using ::testing::Invoke;
-using ::testing::Return;
-
-namespace base {
-namespace trace_event {
-
-namespace {
-
-const TimeDelta kMs = TimeDelta::FromMilliseconds(1);
-const MemoryPeakDetector::Config kConfigNoCallbacks(
- 1 /* polling_interval_ms */,
- 60000 /* min_time_between_peaks_ms */,
- false /* enable_verbose_poll_tracing */
- );
-
-class MockMemoryDumpProvider : public MemoryDumpProvider {
- public:
- bool OnMemoryDump(const MemoryDumpArgs&, ProcessMemoryDump*) override {
- NOTREACHED();
- return true;
- }
-
- MOCK_METHOD1(PollFastMemoryTotal, void(uint64_t*));
-};
-
-// Wrapper to use gmock on a callback.
-struct OnPeakDetectedWrapper {
- MOCK_METHOD0(OnPeak, void());
-};
-
-} // namespace
-
-class MemoryPeakDetectorTest : public testing::Test {
- public:
- struct FriendDeleter {
- void operator()(MemoryPeakDetector* inst) { delete inst; }
- };
-
- MemoryPeakDetectorTest() : testing::Test() {}
- static const uint64_t kSlidingWindowNumSamples =
- MemoryPeakDetector::kSlidingWindowNumSamples;
-
- std::unique_ptr<MemoryPeakDetector, FriendDeleter> NewInstance() {
- return std::unique_ptr<MemoryPeakDetector, FriendDeleter>(
- new MemoryPeakDetector());
- }
-
- void RestartThreadAndReinitializePeakDetector() {
- bg_thread_.reset(new Thread("Peak Detector Test Thread"));
- bg_thread_->Start();
- peak_detector_ = NewInstance();
- peak_detector_->Setup(
- Bind(&MemoryPeakDetectorTest::MockGetDumpProviders, Unretained(this)),
- bg_thread_->task_runner(),
- Bind(&OnPeakDetectedWrapper::OnPeak, Unretained(&on_peak_callback_)));
- }
-
- void SetUp() override {
- get_mdp_call_count_ = 0;
- RestartThreadAndReinitializePeakDetector();
- }
-
- void TearDown() override {
- peak_detector_->TearDown();
- bg_thread_->FlushForTesting();
- EXPECT_EQ(MemoryPeakDetector::NOT_INITIALIZED, GetPeakDetectorState());
- bg_thread_.reset();
- dump_providers_.clear();
- }
-
- // Calls MemoryPeakDetector::state_for_testing() on the bg thread and returns
- // the result on the current thread.
- MemoryPeakDetector::State GetPeakDetectorState() {
- WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
- MemoryPeakDetector::State res = MemoryPeakDetector::NOT_INITIALIZED;
- auto get_fn = [](MemoryPeakDetector* peak_detector, WaitableEvent* evt,
- MemoryPeakDetector::State* res) {
- *res = peak_detector->state_for_testing();
- evt->Signal();
- };
- bg_thread_->task_runner()->PostTask(
- FROM_HERE, BindOnce(get_fn, Unretained(&*peak_detector_),
- Unretained(&evt), Unretained(&res)));
- evt.Wait();
- return res;
- }
-
- // Calls MemoryPeakDetector::poll_tasks_count_for_testing() on the bg thread
- // and returns the result on the current thread.
- uint32_t GetNumPollingTasksRan() {
- uint32_t res = 0;
- auto get_fn = [](MemoryPeakDetector* peak_detector, WaitableEvent* evt,
- uint32_t* res) {
- *res = peak_detector->poll_tasks_count_for_testing();
- evt->Signal();
- };
-
- WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
- bg_thread_->task_runner()->PostTask(
- FROM_HERE, BindOnce(get_fn, Unretained(&*peak_detector_),
- Unretained(&evt), Unretained(&res)));
- evt.Wait();
- return res;
- }
-
- // Runs the peak detector with a mock MDP with the given
- // |config|. The mock MDP will invoke the |poll_function| on any call to
- // PollFastMemoryTotal(), until |num_samples| have been polled.
- // It returns the number of peaks detected.
- uint32_t RunWithCustomPollFunction(
- MemoryPeakDetector::Config config,
- uint32_t num_samples,
- RepeatingCallback<uint64_t(uint32_t)> poll_function) {
- WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
- scoped_refptr<MemoryDumpProviderInfo> mdp = CreateMockDumpProvider();
- dump_providers_.push_back(mdp);
- uint32_t cur_sample_idx = 0;
- EXPECT_CALL(GetMockMDP(mdp), PollFastMemoryTotal(_))
- .WillRepeatedly(Invoke(
- [&cur_sample_idx, &evt, poll_function, num_samples](uint64_t* mem) {
- if (cur_sample_idx >= num_samples) {
- *mem = 1;
- evt.Signal();
- } else {
- *mem = poll_function.Run(cur_sample_idx++);
- }
- }));
-
- uint32_t num_peaks = 0;
- EXPECT_CALL(on_peak_callback_, OnPeak())
- .WillRepeatedly(Invoke([&num_peaks] { num_peaks++; }));
- peak_detector_->Start(config);
- evt.Wait(); // Wait for |num_samples| invocations of PollFastMemoryTotal().
- peak_detector_->Stop();
- EXPECT_EQ(num_samples, cur_sample_idx);
- EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
- return num_peaks;
- }
-
- // Called on the |bg_thread_|.
- void MockGetDumpProviders(MemoryPeakDetector::DumpProvidersList* mdps) {
- get_mdp_call_count_++;
- *mdps = dump_providers_;
- }
-
- uint32_t GetNumGetDumpProvidersCalls() {
- bg_thread_->FlushForTesting();
- return get_mdp_call_count_;
- }
-
- scoped_refptr<MemoryDumpProviderInfo> CreateMockDumpProvider() {
- std::unique_ptr<MockMemoryDumpProvider> mdp(new MockMemoryDumpProvider());
- MemoryDumpProvider::Options opt;
- opt.is_fast_polling_supported = true;
- scoped_refptr<MemoryDumpProviderInfo> mdp_info(new MemoryDumpProviderInfo(
- mdp.get(), "Mock MDP", nullptr, opt,
- false /* whitelisted_for_background_mode */));
-
- // The |mdp| instance will be destroyed together with the |mdp_info|.
- mdp_info->owned_dump_provider = std::move(mdp);
- return mdp_info;
- }
-
- static MockMemoryDumpProvider& GetMockMDP(
- const scoped_refptr<MemoryDumpProviderInfo>& mdp_info) {
- return *static_cast<MockMemoryDumpProvider*>(mdp_info->dump_provider);
- }
-
- static uint64_t PollFunctionThatCausesPeakViaStdDev(uint32_t sample_idx) {
- // Start with a baseline of 50 MB.
- if (sample_idx < kSlidingWindowNumSamples)
- return 50000 + (sample_idx % 3) * 100;
-
- // Then 10 samples around 80 MB
- if (sample_idx < 10 + kSlidingWindowNumSamples)
- return 80000 + (sample_idx % 3) * 200;
-
- // Than back to 60 MB.
- if (sample_idx < 2 * kSlidingWindowNumSamples)
- return 60000 + (sample_idx % 3) * 100;
-
- // Then 20 samples around 120 MB.
- if (sample_idx < 20 + 2 * kSlidingWindowNumSamples)
- return 120000 + (sample_idx % 3) * 200;
-
- // Then back to idle to around 50 MB until the end.
- return 50000 + (sample_idx % 3) * 100;
- }
-
- protected:
- MemoryPeakDetector::DumpProvidersList dump_providers_;
- uint32_t get_mdp_call_count_;
- std::unique_ptr<MemoryPeakDetector, FriendDeleter> peak_detector_;
- std::unique_ptr<Thread> bg_thread_;
- OnPeakDetectedWrapper on_peak_callback_;
-};
-
-const uint64_t MemoryPeakDetectorTest::kSlidingWindowNumSamples;
-
-TEST_F(MemoryPeakDetectorTest, GetDumpProvidersFunctionCalled) {
- EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
- peak_detector_->Start(kConfigNoCallbacks);
- EXPECT_EQ(1u, GetNumGetDumpProvidersCalls());
- EXPECT_EQ(MemoryPeakDetector::ENABLED, GetPeakDetectorState());
-
- peak_detector_->Stop();
- EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
- EXPECT_EQ(0u, GetNumPollingTasksRan());
-}
-
-TEST_F(MemoryPeakDetectorTest, ThrottleAndNotifyBeforeInitialize) {
- peak_detector_->TearDown();
-
- WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
- scoped_refptr<MemoryDumpProviderInfo> mdp = CreateMockDumpProvider();
- EXPECT_CALL(GetMockMDP(mdp), PollFastMemoryTotal(_))
- .WillRepeatedly(Invoke([&evt](uint64_t*) { evt.Signal(); }));
- dump_providers_.push_back(mdp);
- peak_detector_->Throttle();
- peak_detector_->NotifyMemoryDumpProvidersChanged();
- EXPECT_EQ(MemoryPeakDetector::NOT_INITIALIZED, GetPeakDetectorState());
- RestartThreadAndReinitializePeakDetector();
-
- peak_detector_->Start(kConfigNoCallbacks);
- EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
- evt.Wait(); // Wait for a PollFastMemoryTotal() call.
-
- peak_detector_->Stop();
- EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
- EXPECT_EQ(1u, GetNumGetDumpProvidersCalls());
- EXPECT_GE(GetNumPollingTasksRan(), 1u);
-}
-
-TEST_F(MemoryPeakDetectorTest, DoubleStop) {
- peak_detector_->Start(kConfigNoCallbacks);
- EXPECT_EQ(MemoryPeakDetector::ENABLED, GetPeakDetectorState());
-
- peak_detector_->Stop();
- EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
-
- peak_detector_->Stop();
- EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
-
- EXPECT_EQ(1u, GetNumGetDumpProvidersCalls());
- EXPECT_EQ(0u, GetNumPollingTasksRan());
-}
-
-TEST_F(MemoryPeakDetectorTest, OneDumpProviderRegisteredBeforeStart) {
- WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
- scoped_refptr<MemoryDumpProviderInfo> mdp = CreateMockDumpProvider();
- EXPECT_CALL(GetMockMDP(mdp), PollFastMemoryTotal(_))
- .WillRepeatedly(Invoke([&evt](uint64_t*) { evt.Signal(); }));
- dump_providers_.push_back(mdp);
-
- peak_detector_->Start(kConfigNoCallbacks);
- evt.Wait(); // Signaled when PollFastMemoryTotal() is called on the MockMDP.
- EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
-
- peak_detector_->Stop();
- EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
- EXPECT_EQ(1u, GetNumGetDumpProvidersCalls());
- EXPECT_GT(GetNumPollingTasksRan(), 0u);
-}
-
-TEST_F(MemoryPeakDetectorTest, ReInitializeAndRebindToNewThread) {
- WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
- scoped_refptr<MemoryDumpProviderInfo> mdp = CreateMockDumpProvider();
- EXPECT_CALL(GetMockMDP(mdp), PollFastMemoryTotal(_))
- .WillRepeatedly(Invoke([&evt](uint64_t*) { evt.Signal(); }));
- dump_providers_.push_back(mdp);
-
- for (int i = 0; i < 5; ++i) {
- evt.Reset();
- peak_detector_->Start(kConfigNoCallbacks);
- evt.Wait(); // Wait for a PollFastMemoryTotal() call.
- // Check that calling TearDown implicitly does a Stop().
- peak_detector_->TearDown();
-
- // Reinitialize and re-bind to a new task runner.
- RestartThreadAndReinitializePeakDetector();
- }
-}
-
-TEST_F(MemoryPeakDetectorTest, OneDumpProviderRegisteredOutOfBand) {
- peak_detector_->Start(kConfigNoCallbacks);
- EXPECT_EQ(MemoryPeakDetector::ENABLED, GetPeakDetectorState());
- EXPECT_EQ(1u, GetNumGetDumpProvidersCalls());
-
- // Check that no poll tasks are posted before any dump provider is registered.
- PlatformThread::Sleep(5 * kConfigNoCallbacks.polling_interval_ms * kMs);
- EXPECT_EQ(0u, GetNumPollingTasksRan());
-
- // Registed the MDP After Start() has been issued and expect that the
- // PeakDetector transitions ENABLED -> RUNNING on the next
- // NotifyMemoryDumpProvidersChanged() call.
- WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
- scoped_refptr<MemoryDumpProviderInfo> mdp = CreateMockDumpProvider();
- EXPECT_CALL(GetMockMDP(mdp), PollFastMemoryTotal(_))
- .WillRepeatedly(Invoke([&evt](uint64_t*) { evt.Signal(); }));
- dump_providers_.push_back(mdp);
- peak_detector_->NotifyMemoryDumpProvidersChanged();
-
- evt.Wait(); // Signaled when PollFastMemoryTotal() is called on the MockMDP.
- EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
- EXPECT_EQ(2u, GetNumGetDumpProvidersCalls());
-
- // Now simulate the unregisration and expect that the PeakDetector transitions
- // back to ENABLED.
- dump_providers_.clear();
- peak_detector_->NotifyMemoryDumpProvidersChanged();
- EXPECT_EQ(MemoryPeakDetector::ENABLED, GetPeakDetectorState());
- EXPECT_EQ(3u, GetNumGetDumpProvidersCalls());
- uint32_t num_poll_tasks = GetNumPollingTasksRan();
- EXPECT_GT(num_poll_tasks, 0u);
-
- // At this point, no more polling tasks should be posted.
- PlatformThread::Sleep(5 * kConfigNoCallbacks.polling_interval_ms * kMs);
- peak_detector_->Stop();
- EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
- EXPECT_EQ(num_poll_tasks, GetNumPollingTasksRan());
-}
-
-// Test that a sequence of Start()/Stop() back-to-back doesn't end up creating
-// several outstanding timer tasks and instead respects the polling_interval_ms.
-TEST_F(MemoryPeakDetectorTest, StartStopQuickly) {
- WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
- scoped_refptr<MemoryDumpProviderInfo> mdp = CreateMockDumpProvider();
- dump_providers_.push_back(mdp);
- const uint32_t kNumPolls = 20;
- uint32_t polls_done = 0;
- EXPECT_CALL(GetMockMDP(mdp), PollFastMemoryTotal(_))
- .WillRepeatedly(Invoke([&polls_done, &evt, kNumPolls](uint64_t*) {
- if (++polls_done == kNumPolls)
- evt.Signal();
- }));
-
- const TimeTicks tstart = TimeTicks::Now();
- for (int i = 0; i < 5; i++) {
- peak_detector_->Start(kConfigNoCallbacks);
- peak_detector_->Stop();
- }
-
- bg_thread_->task_runner()->PostTask(
- FROM_HERE, base::BindOnce([](uint32_t* polls_done) { *polls_done = 0; },
- &polls_done));
-
- peak_detector_->Start(kConfigNoCallbacks);
- EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
- evt.Wait(); // Wait for kNumPolls.
- const double time_ms = (TimeTicks::Now() - tstart).InMillisecondsF();
-
- EXPECT_GE(time_ms, (kNumPolls - 1) * kConfigNoCallbacks.polling_interval_ms);
- peak_detector_->Stop();
-}
-
-TEST_F(MemoryPeakDetectorTest, RegisterAndUnregisterTwoDumpProviders) {
- WaitableEvent evt1(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
- WaitableEvent evt2(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
- scoped_refptr<MemoryDumpProviderInfo> mdp1 = CreateMockDumpProvider();
- scoped_refptr<MemoryDumpProviderInfo> mdp2 = CreateMockDumpProvider();
- EXPECT_CALL(GetMockMDP(mdp1), PollFastMemoryTotal(_))
- .WillRepeatedly(Invoke([&evt1](uint64_t*) { evt1.Signal(); }));
- EXPECT_CALL(GetMockMDP(mdp2), PollFastMemoryTotal(_))
- .WillRepeatedly(Invoke([&evt2](uint64_t*) { evt2.Signal(); }));
-
- // Register only one MDP and start the detector.
- dump_providers_.push_back(mdp1);
- peak_detector_->Start(kConfigNoCallbacks);
- EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
-
- // Wait for one poll task and then register also the other one.
- evt1.Wait();
- dump_providers_.push_back(mdp2);
- peak_detector_->NotifyMemoryDumpProvidersChanged();
- evt2.Wait();
- EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
-
- // Now unregister the first MDP and check that everything is still running.
- dump_providers_.erase(dump_providers_.begin());
- peak_detector_->NotifyMemoryDumpProvidersChanged();
- EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
-
- // Now unregister both and check that the detector goes to idle.
- dump_providers_.clear();
- peak_detector_->NotifyMemoryDumpProvidersChanged();
- EXPECT_EQ(MemoryPeakDetector::ENABLED, GetPeakDetectorState());
-
- // Now re-register both and check that the detector re-activates posting
- // new polling tasks.
- uint32_t num_poll_tasks = GetNumPollingTasksRan();
- evt1.Reset();
- evt2.Reset();
- dump_providers_.push_back(mdp1);
- dump_providers_.push_back(mdp2);
- peak_detector_->NotifyMemoryDumpProvidersChanged();
- evt1.Wait();
- evt2.Wait();
- EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
- EXPECT_GT(GetNumPollingTasksRan(), num_poll_tasks);
-
- // Stop everything, tear down the MDPs, restart the detector and check that
- // it detector doesn't accidentally try to re-access them.
- peak_detector_->Stop();
- EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
- dump_providers_.clear();
- mdp1 = nullptr;
- mdp2 = nullptr;
-
- num_poll_tasks = GetNumPollingTasksRan();
- peak_detector_->Start(kConfigNoCallbacks);
- EXPECT_EQ(MemoryPeakDetector::ENABLED, GetPeakDetectorState());
- PlatformThread::Sleep(5 * kConfigNoCallbacks.polling_interval_ms * kMs);
-
- peak_detector_->Stop();
- EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
- EXPECT_EQ(num_poll_tasks, GetNumPollingTasksRan());
-
- EXPECT_EQ(6u, GetNumGetDumpProvidersCalls());
-}
-
-// Tests the behavior of the static threshold detector, which is supposed to
-// detect a peak whenever an increase >= threshold is detected.
-TEST_F(MemoryPeakDetectorTest, StaticThreshold) {
- const uint32_t kNumSamples = 2 * kSlidingWindowNumSamples;
- constexpr uint32_t kNumSamplesPerStep = 10;
- constexpr uint64_t kThreshold = 1000000;
- peak_detector_->SetStaticThresholdForTesting(kThreshold);
- const MemoryPeakDetector::Config kConfig(
- 1 /* polling_interval_ms */, 0 /* min_time_between_peaks_ms */,
- false /* enable_verbose_poll_tracing */
- );
-
- // The mocked PollFastMemoryTotal() will return a step function,
- // e.g. (1, 1, 1, 5, 5, 5, ...) where the steps are 2x threshold, in order to
- // trigger only the static threshold logic.
- auto poll_fn = Bind(
- [](const uint32_t kNumSamplesPerStep, const uint64_t kThreshold,
- uint32_t sample_idx) -> uint64_t {
- return (1 + sample_idx / kNumSamplesPerStep) * 2 * kThreshold;
- },
- kNumSamplesPerStep, kThreshold);
- uint32_t num_peaks = RunWithCustomPollFunction(kConfig, kNumSamples, poll_fn);
- EXPECT_EQ(kNumSamples / kNumSamplesPerStep - 1, num_peaks);
-}
-
-// Checks the throttling logic of Config's |min_time_between_peaks_ms|.
-TEST_F(MemoryPeakDetectorTest, PeakCallbackThrottling) {
- const size_t kNumSamples = 2 * kSlidingWindowNumSamples;
- constexpr uint64_t kThreshold = 1000000;
- peak_detector_->SetStaticThresholdForTesting(kThreshold);
- const MemoryPeakDetector::Config kConfig(
- 1 /* polling_interval_ms */, 4 /* min_time_between_peaks_ms */,
- false /* enable_verbose_poll_tracing */
- );
-
- // Each mock value returned is N * 2 * threshold, so all of them would be
- // eligible to be a peak if throttling wasn't enabled.
- auto poll_fn = Bind(
- [](uint64_t kThreshold, uint32_t sample_idx) -> uint64_t {
- return (sample_idx + 1) * 2 * kThreshold;
- },
- kThreshold);
- uint32_t num_peaks = RunWithCustomPollFunction(kConfig, kNumSamples, poll_fn);
- const uint32_t kExpectedThrottlingRate =
- kConfig.min_time_between_peaks_ms / kConfig.polling_interval_ms;
- EXPECT_LT(num_peaks, kNumSamples / kExpectedThrottlingRate);
-}
-
-TEST_F(MemoryPeakDetectorTest, StdDev) {
- // Set the threshold to some arbitrarily high value, so that the static
- // threshold logic is not hit in this test.
- constexpr uint64_t kThreshold = 1024 * 1024 * 1024;
- peak_detector_->SetStaticThresholdForTesting(kThreshold);
- const size_t kNumSamples = 3 * kSlidingWindowNumSamples;
- const MemoryPeakDetector::Config kConfig(
- 1 /* polling_interval_ms */, 0 /* min_time_between_peaks_ms */,
- false /* enable_verbose_poll_tracing */
- );
-
- auto poll_fn = Bind(&PollFunctionThatCausesPeakViaStdDev);
- uint32_t num_peaks = RunWithCustomPollFunction(kConfig, kNumSamples, poll_fn);
- EXPECT_EQ(2u, num_peaks); // 80 MB, 120 MB.
-}
-
-// Tests that Throttle() actually holds back peak notifications.
-TEST_F(MemoryPeakDetectorTest, Throttle) {
- constexpr uint64_t kThreshold = 1024 * 1024 * 1024;
- const uint32_t kNumSamples = 3 * kSlidingWindowNumSamples;
- peak_detector_->SetStaticThresholdForTesting(kThreshold);
- const MemoryPeakDetector::Config kConfig(
- 1 /* polling_interval_ms */, 0 /* min_time_between_peaks_ms */,
- false /* enable_verbose_poll_tracing */
- );
-
- auto poll_fn = Bind(
- [](MemoryPeakDetector* peak_detector, uint32_t sample_idx) -> uint64_t {
- if (sample_idx % 20 == 20 - 1)
- peak_detector->Throttle();
- return PollFunctionThatCausesPeakViaStdDev(sample_idx);
- },
- Unretained(&*peak_detector_));
- uint32_t num_peaks = RunWithCustomPollFunction(kConfig, kNumSamples, poll_fn);
- EXPECT_EQ(0u, num_peaks);
-}
-
-// Tests that the windows stddev state is not carried over through
-// Stop() -> Start() sequences.
-TEST_F(MemoryPeakDetectorTest, RestartClearsState) {
- constexpr uint64_t kThreshold = 1024 * 1024 * 1024;
- peak_detector_->SetStaticThresholdForTesting(kThreshold);
- const size_t kNumSamples = 3 * kSlidingWindowNumSamples;
- const MemoryPeakDetector::Config kConfig(
- 1 /* polling_interval_ms */, 0 /* min_time_between_peaks_ms */,
- false /* enable_verbose_poll_tracing */
- );
- auto poll_fn = Bind(
- [](MemoryPeakDetector* peak_detector,
- const uint32_t kSlidingWindowNumSamples,
- MemoryPeakDetector::Config kConfig, uint32_t sample_idx) -> uint64_t {
- if (sample_idx % kSlidingWindowNumSamples ==
- kSlidingWindowNumSamples - 1) {
- peak_detector->Stop();
- peak_detector->Start(kConfig);
- }
- return PollFunctionThatCausesPeakViaStdDev(sample_idx);
- },
- Unretained(&*peak_detector_), kSlidingWindowNumSamples, kConfig);
- uint32_t num_peaks = RunWithCustomPollFunction(kConfig, kNumSamples, poll_fn);
- EXPECT_EQ(0u, num_peaks);
-}
-
-} // namespace trace_event
-} // namespace base
diff --git a/chromium/base/trace_event/process_memory_dump.cc b/chromium/base/trace_event/process_memory_dump.cc
index 744257899eb..362641c400d 100644
--- a/chromium/base/trace_event/process_memory_dump.cc
+++ b/chromium/base/trace_event/process_memory_dump.cc
@@ -12,8 +12,6 @@
#include "base/memory/shared_memory_tracker.h"
#include "base/process/process_metrics.h"
#include "base/strings/stringprintf.h"
-#include "base/trace_event/heap_profiler_heap_dump_writer.h"
-#include "base/trace_event/heap_profiler_serialization_state.h"
#include "base/trace_event/memory_infra_background_whitelist.h"
#include "base/trace_event/trace_event_argument.h"
#include "base/unguessable_token.h"
@@ -230,12 +228,8 @@ base::Optional<size_t> ProcessMemoryDump::CountResidentBytesInSharedMemory(
#endif // defined(COUNT_RESIDENT_BYTES_SUPPORTED)
ProcessMemoryDump::ProcessMemoryDump(
- scoped_refptr<HeapProfilerSerializationState>
- heap_profiler_serialization_state,
const MemoryDumpArgs& dump_args)
: process_token_(GetTokenForCurrentProcess()),
- heap_profiler_serialization_state_(
- std::move(heap_profiler_serialization_state)),
dump_args_(dump_args) {}
ProcessMemoryDump::~ProcessMemoryDump() = default;
@@ -278,8 +272,6 @@ MemoryAllocatorDump* ProcessMemoryDump::GetAllocatorDump(
auto it = allocator_dumps_.find(absolute_name);
if (it != allocator_dumps_.end())
return it->second.get();
- if (black_hole_mad_)
- return black_hole_mad_.get();
return nullptr;
}
@@ -324,15 +316,6 @@ void ProcessMemoryDump::DumpHeapUsage(
metrics_by_context,
base::trace_event::TraceEventMemoryOverhead& overhead,
const char* allocator_name) {
- // The heap profiler serialization state can be null here if heap profiler was
- // enabled when a process dump is in progress.
- if (heap_profiler_serialization_state() && !metrics_by_context.empty()) {
- DCHECK_EQ(0ul, heap_dumps_.count(allocator_name));
- std::unique_ptr<TracedValue> heap_dump = ExportHeapDump(
- metrics_by_context, *heap_profiler_serialization_state());
- heap_dumps_[allocator_name] = std::move(heap_dump);
- }
-
std::string base_name = base::StringPrintf("tracing/heap_profiler_%s",
allocator_name);
overhead.DumpInto(base_name.c_str(), this);
@@ -366,7 +349,6 @@ void ProcessMemoryDump::SetAllEdgesForSerialization(
void ProcessMemoryDump::Clear() {
allocator_dumps_.clear();
allocator_dumps_edges_.clear();
- heap_dumps_.clear();
}
void ProcessMemoryDump::TakeAllDumpsFrom(ProcessMemoryDump* other) {
@@ -380,12 +362,6 @@ void ProcessMemoryDump::TakeAllDumpsFrom(ProcessMemoryDump* other) {
allocator_dumps_edges_.insert(other->allocator_dumps_edges_.begin(),
other->allocator_dumps_edges_.end());
other->allocator_dumps_edges_.clear();
-
- for (auto& it : other->heap_dumps_) {
- DCHECK_EQ(0ul, heap_dumps_.count(it.first));
- heap_dumps_.insert(std::make_pair(it.first, std::move(it.second)));
- }
- other->heap_dumps_.clear();
}
void ProcessMemoryDump::SerializeAllocatorDumpsInto(TracedValue* value) const {
@@ -409,16 +385,6 @@ void ProcessMemoryDump::SerializeAllocatorDumpsInto(TracedValue* value) const {
value->EndArray();
}
-void ProcessMemoryDump::SerializeHeapProfilerDumpsInto(
- TracedValue* value) const {
- if (heap_dumps_.size() == 0)
- return;
- value->BeginDictionary("heaps");
- for (const auto& name_and_dump : heap_dumps_)
- value->SetValueWithCopiedName(name_and_dump.first, *name_and_dump.second);
- value->EndDictionary(); // "heaps"
-}
-
void ProcessMemoryDump::AddOwnershipEdge(const MemoryAllocatorDumpGuid& source,
const MemoryAllocatorDumpGuid& target,
int importance) {
diff --git a/chromium/base/trace_event/process_memory_dump.h b/chromium/base/trace_event/process_memory_dump.h
index a732a2623c4..e2457b73898 100644
--- a/chromium/base/trace_event/process_memory_dump.h
+++ b/chromium/base/trace_event/process_memory_dump.h
@@ -14,7 +14,7 @@
#include "base/base_export.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
-#include "base/trace_event/heap_profiler_serialization_state.h"
+#include "base/trace_event/heap_profiler_allocation_context.h"
#include "base/trace_event/memory_allocator_dump.h"
#include "base/trace_event/memory_allocator_dump_guid.h"
#include "base/trace_event/memory_dump_request_args.h"
@@ -33,7 +33,6 @@ class UnguessableToken;
namespace trace_event {
-class HeapProfilerSerializationState;
class TracedValue;
// ProcessMemoryDump is as a strongly typed container which holds the dumps
@@ -55,8 +54,6 @@ class BASE_EXPORT ProcessMemoryDump {
using AllocatorDumpsMap =
std::map<std::string, std::unique_ptr<MemoryAllocatorDump>>;
- using HeapDumpsMap = std::map<std::string, std::unique_ptr<TracedValue>>;
-
// Stores allocator dump edges indexed by source allocator dump GUID.
using AllocatorDumpEdgesMap =
std::map<MemoryAllocatorDumpGuid, MemoryAllocatorDumpEdge>;
@@ -81,9 +78,7 @@ class BASE_EXPORT ProcessMemoryDump {
size_t mapped_size);
#endif
- ProcessMemoryDump(scoped_refptr<HeapProfilerSerializationState>
- heap_profiler_serialization_state,
- const MemoryDumpArgs& dump_args);
+ explicit ProcessMemoryDump(const MemoryDumpArgs& dump_args);
ProcessMemoryDump(ProcessMemoryDump&&);
~ProcessMemoryDump();
@@ -215,11 +210,6 @@ class BASE_EXPORT ProcessMemoryDump {
void AddSuballocation(const MemoryAllocatorDumpGuid& source,
const std::string& target_node_name);
- const scoped_refptr<HeapProfilerSerializationState>&
- heap_profiler_serialization_state() const {
- return heap_profiler_serialization_state_;
- }
-
// Removes all the MemoryAllocatorDump(s) contained in this instance. This
// ProcessMemoryDump can be safely reused as if it was new once this returns.
void Clear();
@@ -236,11 +226,6 @@ class BASE_EXPORT ProcessMemoryDump {
// dumps.
void SerializeAllocatorDumpsInto(TracedValue* value) const;
- // Populate the traced value with information about the heap profiler.
- void SerializeHeapProfilerDumpsInto(TracedValue* value) const;
-
- const HeapDumpsMap& heap_dumps() const { return heap_dumps_; }
-
const MemoryDumpArgs& dump_args() const { return dump_args_; }
private:
@@ -274,11 +259,6 @@ class BASE_EXPORT ProcessMemoryDump {
UnguessableToken process_token_;
AllocatorDumpsMap allocator_dumps_;
- HeapDumpsMap heap_dumps_;
-
- // State shared among all PMDs instances created in a given trace session.
- scoped_refptr<HeapProfilerSerializationState>
- heap_profiler_serialization_state_;
// Keeps track of relationships between MemoryAllocatorDump(s).
AllocatorDumpEdgesMap allocator_dumps_edges_;
diff --git a/chromium/base/trace_event/process_memory_dump_unittest.cc b/chromium/base/trace_event/process_memory_dump_unittest.cc
index f1209ca5d25..d5f771d81bf 100644
--- a/chromium/base/trace_event/process_memory_dump_unittest.cc
+++ b/chromium/base/trace_event/process_memory_dump_unittest.cc
@@ -38,11 +38,6 @@ const char* const kTestDumpNameWhitelist[] = {
"Whitelisted/TestName", "Whitelisted/TestName_0x?",
"Whitelisted/0x?/TestName", "Whitelisted/0x?", nullptr};
-TracedValue* GetHeapDump(const ProcessMemoryDump& pmd, const char* name) {
- auto it = pmd.heap_dumps().find(name);
- return it == pmd.heap_dumps().end() ? nullptr : it->second.get();
-}
-
void* Map(size_t size) {
#if defined(OS_WIN)
return ::VirtualAlloc(nullptr, size, MEM_RESERVE | MEM_COMMIT,
@@ -66,12 +61,7 @@ void Unmap(void* addr, size_t size) {
} // namespace
TEST(ProcessMemoryDumpTest, MoveConstructor) {
- auto heap_state = MakeRefCounted<HeapProfilerSerializationState>();
- heap_state->SetStackFrameDeduplicator(
- std::make_unique<StackFrameDeduplicator>());
- heap_state->SetTypeNameDeduplicator(std::make_unique<TypeNameDeduplicator>());
-
- ProcessMemoryDump pmd1 = ProcessMemoryDump(heap_state, kDetailedDumpArgs);
+ ProcessMemoryDump pmd1 = ProcessMemoryDump(kDetailedDumpArgs);
pmd1.CreateAllocatorDump("mad1");
pmd1.CreateAllocatorDump("mad2");
pmd1.AddOwnershipEdge(MemoryAllocatorDumpGuid(42),
@@ -84,27 +74,20 @@ TEST(ProcessMemoryDumpTest, MoveConstructor) {
EXPECT_EQ(MemoryDumpLevelOfDetail::DETAILED,
pmd2.dump_args().level_of_detail);
EXPECT_EQ(1u, pmd2.allocator_dumps_edges().size());
- EXPECT_EQ(heap_state.get(), pmd2.heap_profiler_serialization_state().get());
// Check that calling serialization routines doesn't cause a crash.
auto traced_value = std::make_unique<TracedValue>();
pmd2.SerializeAllocatorDumpsInto(traced_value.get());
- pmd2.SerializeHeapProfilerDumpsInto(traced_value.get());
}
TEST(ProcessMemoryDumpTest, MoveAssignment) {
- auto heap_state = MakeRefCounted<HeapProfilerSerializationState>();
- heap_state->SetStackFrameDeduplicator(
- std::make_unique<StackFrameDeduplicator>());
- heap_state->SetTypeNameDeduplicator(std::make_unique<TypeNameDeduplicator>());
-
- ProcessMemoryDump pmd1 = ProcessMemoryDump(heap_state, kDetailedDumpArgs);
+ ProcessMemoryDump pmd1 = ProcessMemoryDump(kDetailedDumpArgs);
pmd1.CreateAllocatorDump("mad1");
pmd1.CreateAllocatorDump("mad2");
pmd1.AddOwnershipEdge(MemoryAllocatorDumpGuid(42),
MemoryAllocatorDumpGuid(4242));
- ProcessMemoryDump pmd2(nullptr, {MemoryDumpLevelOfDetail::BACKGROUND});
+ ProcessMemoryDump pmd2({MemoryDumpLevelOfDetail::BACKGROUND});
pmd2.CreateAllocatorDump("malloc");
pmd2 = std::move(pmd1);
@@ -114,17 +97,15 @@ TEST(ProcessMemoryDumpTest, MoveAssignment) {
EXPECT_EQ(MemoryDumpLevelOfDetail::DETAILED,
pmd2.dump_args().level_of_detail);
EXPECT_EQ(1u, pmd2.allocator_dumps_edges().size());
- EXPECT_EQ(heap_state.get(), pmd2.heap_profiler_serialization_state().get());
// Check that calling serialization routines doesn't cause a crash.
auto traced_value = std::make_unique<TracedValue>();
pmd2.SerializeAllocatorDumpsInto(traced_value.get());
- pmd2.SerializeHeapProfilerDumpsInto(traced_value.get());
}
TEST(ProcessMemoryDumpTest, Clear) {
std::unique_ptr<ProcessMemoryDump> pmd1(
- new ProcessMemoryDump(nullptr, kDetailedDumpArgs));
+ new ProcessMemoryDump(kDetailedDumpArgs));
pmd1->CreateAllocatorDump("mad1");
pmd1->CreateAllocatorDump("mad2");
ASSERT_FALSE(pmd1->allocator_dumps().empty());
@@ -148,7 +129,6 @@ TEST(ProcessMemoryDumpTest, Clear) {
// Check that calling serialization routines doesn't cause a crash.
auto traced_value = std::make_unique<TracedValue>();
pmd1->SerializeAllocatorDumpsInto(traced_value.get());
- pmd1->SerializeHeapProfilerDumpsInto(traced_value.get());
// Check that the pmd can be reused and behaves as expected.
auto* mad1 = pmd1->CreateAllocatorDump("mad1");
@@ -167,7 +147,6 @@ TEST(ProcessMemoryDumpTest, Clear) {
traced_value.reset(new TracedValue);
pmd1->SerializeAllocatorDumpsInto(traced_value.get());
- pmd1->SerializeHeapProfilerDumpsInto(traced_value.get());
pmd1.reset();
}
@@ -178,22 +157,16 @@ TEST(ProcessMemoryDumpTest, TakeAllDumpsFrom) {
metrics_by_context[AllocationContext()] = {1, 1};
TraceEventMemoryOverhead overhead;
- scoped_refptr<HeapProfilerSerializationState>
- heap_profiler_serialization_state = new HeapProfilerSerializationState;
- heap_profiler_serialization_state->SetStackFrameDeduplicator(
- WrapUnique(new StackFrameDeduplicator));
- heap_profiler_serialization_state->SetTypeNameDeduplicator(
- WrapUnique(new TypeNameDeduplicator));
- std::unique_ptr<ProcessMemoryDump> pmd1(new ProcessMemoryDump(
- heap_profiler_serialization_state.get(), kDetailedDumpArgs));
+ std::unique_ptr<ProcessMemoryDump> pmd1(
+ new ProcessMemoryDump(kDetailedDumpArgs));
auto* mad1_1 = pmd1->CreateAllocatorDump("pmd1/mad1");
auto* mad1_2 = pmd1->CreateAllocatorDump("pmd1/mad2");
pmd1->AddOwnershipEdge(mad1_1->guid(), mad1_2->guid());
pmd1->DumpHeapUsage(metrics_by_context, overhead, "pmd1/heap_dump1");
pmd1->DumpHeapUsage(metrics_by_context, overhead, "pmd1/heap_dump2");
- std::unique_ptr<ProcessMemoryDump> pmd2(new ProcessMemoryDump(
- heap_profiler_serialization_state.get(), kDetailedDumpArgs));
+ std::unique_ptr<ProcessMemoryDump> pmd2(
+ new ProcessMemoryDump(kDetailedDumpArgs));
auto* mad2_1 = pmd2->CreateAllocatorDump("pmd2/mad1");
auto* mad2_2 = pmd2->CreateAllocatorDump("pmd2/mad2");
pmd2->AddOwnershipEdge(mad2_1->guid(), mad2_2->guid());
@@ -211,7 +184,6 @@ TEST(ProcessMemoryDumpTest, TakeAllDumpsFrom) {
// Make sure that pmd2 is empty but still usable after it has been emptied.
ASSERT_TRUE(pmd2->allocator_dumps().empty());
ASSERT_TRUE(pmd2->allocator_dumps_edges().empty());
- ASSERT_TRUE(pmd2->heap_dumps().empty());
pmd2->CreateAllocatorDump("pmd2/this_mad_stays_with_pmd2");
ASSERT_EQ(1u, pmd2->allocator_dumps().size());
ASSERT_EQ(1u, pmd2->allocator_dumps().count("pmd2/this_mad_stays_with_pmd2"));
@@ -220,7 +192,6 @@ TEST(ProcessMemoryDumpTest, TakeAllDumpsFrom) {
// Check that calling serialization routines doesn't cause a crash.
pmd2->SerializeAllocatorDumpsInto(traced_value.get());
- pmd2->SerializeHeapProfilerDumpsInto(traced_value.get());
// Free the |pmd2| to check that the memory ownership of the two MAD(s)
// has been transferred to |pmd1|.
@@ -236,23 +207,17 @@ TEST(ProcessMemoryDumpTest, TakeAllDumpsFrom) {
ASSERT_EQ(shared_mad1, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid1));
ASSERT_EQ(shared_mad2, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid2));
ASSERT_TRUE(MemoryAllocatorDump::Flags::WEAK & shared_mad2->flags());
- ASSERT_EQ(4u, pmd1->heap_dumps().size());
- ASSERT_TRUE(GetHeapDump(*pmd1, "pmd1/heap_dump1") != nullptr);
- ASSERT_TRUE(GetHeapDump(*pmd1, "pmd1/heap_dump2") != nullptr);
- ASSERT_TRUE(GetHeapDump(*pmd1, "pmd2/heap_dump1") != nullptr);
- ASSERT_TRUE(GetHeapDump(*pmd1, "pmd2/heap_dump2") != nullptr);
// Check that calling serialization routines doesn't cause a crash.
traced_value.reset(new TracedValue);
pmd1->SerializeAllocatorDumpsInto(traced_value.get());
- pmd1->SerializeHeapProfilerDumpsInto(traced_value.get());
pmd1.reset();
}
TEST(ProcessMemoryDumpTest, OverrideOwnershipEdge) {
std::unique_ptr<ProcessMemoryDump> pmd(
- new ProcessMemoryDump(nullptr, kDetailedDumpArgs));
+ new ProcessMemoryDump(kDetailedDumpArgs));
auto* shm_dump1 = pmd->CreateAllocatorDump("shared_mem/seg1");
auto* shm_dump2 = pmd->CreateAllocatorDump("shared_mem/seg2");
@@ -318,7 +283,7 @@ TEST(ProcessMemoryDumpTest, OverrideOwnershipEdge) {
TEST(ProcessMemoryDumpTest, Suballocations) {
std::unique_ptr<ProcessMemoryDump> pmd(
- new ProcessMemoryDump(nullptr, kDetailedDumpArgs));
+ new ProcessMemoryDump(kDetailedDumpArgs));
const std::string allocator_dump_name = "fakealloc/allocated_objects";
pmd->CreateAllocatorDump(allocator_dump_name);
@@ -357,14 +322,13 @@ TEST(ProcessMemoryDumpTest, Suballocations) {
// Check that calling serialization routines doesn't cause a crash.
std::unique_ptr<TracedValue> traced_value(new TracedValue);
pmd->SerializeAllocatorDumpsInto(traced_value.get());
- pmd->SerializeHeapProfilerDumpsInto(traced_value.get());
pmd.reset();
}
TEST(ProcessMemoryDumpTest, GlobalAllocatorDumpTest) {
std::unique_ptr<ProcessMemoryDump> pmd(
- new ProcessMemoryDump(nullptr, kDetailedDumpArgs));
+ new ProcessMemoryDump(kDetailedDumpArgs));
MemoryAllocatorDumpGuid shared_mad_guid(1);
auto* shared_mad1 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
ASSERT_EQ(shared_mad_guid, shared_mad1->guid());
@@ -389,7 +353,7 @@ TEST(ProcessMemoryDumpTest, GlobalAllocatorDumpTest) {
TEST(ProcessMemoryDumpTest, SharedMemoryOwnershipTest) {
std::unique_ptr<ProcessMemoryDump> pmd(
- new ProcessMemoryDump(nullptr, kDetailedDumpArgs));
+ new ProcessMemoryDump(kDetailedDumpArgs));
const ProcessMemoryDump::AllocatorDumpEdgesMap& edges =
pmd->allocator_dumps_edges();
@@ -417,11 +381,15 @@ TEST(ProcessMemoryDumpTest, SharedMemoryOwnershipTest) {
TEST(ProcessMemoryDumpTest, BackgroundModeTest) {
MemoryDumpArgs background_args = {MemoryDumpLevelOfDetail::BACKGROUND};
std::unique_ptr<ProcessMemoryDump> pmd(
- new ProcessMemoryDump(nullptr, background_args));
+ new ProcessMemoryDump(background_args));
ProcessMemoryDump::is_black_hole_non_fatal_for_testing_ = true;
SetAllocatorDumpNameWhitelistForTesting(kTestDumpNameWhitelist);
MemoryAllocatorDump* black_hole_mad = pmd->GetBlackHoleMad();
+ // GetAllocatorDump works for uncreated dumps.
+ EXPECT_EQ(nullptr, pmd->GetAllocatorDump("NotWhitelisted/TestName"));
+ EXPECT_EQ(nullptr, pmd->GetAllocatorDump("Whitelisted/TestName"));
+
// Invalid dump names.
EXPECT_EQ(black_hole_mad,
pmd->CreateAllocatorDump("NotWhitelisted/TestName"));
@@ -455,7 +423,7 @@ TEST(ProcessMemoryDumpTest, BackgroundModeTest) {
pmd->CreateAllocatorDump("Whitelisted/0xaB/TestName"));
// GetAllocatorDump is consistent.
- EXPECT_EQ(black_hole_mad, pmd->GetAllocatorDump("NotWhitelisted/TestName"));
+ EXPECT_EQ(nullptr, pmd->GetAllocatorDump("NotWhitelisted/TestName"));
EXPECT_NE(black_hole_mad, pmd->GetAllocatorDump("Whitelisted/TestName"));
// Test whitelisted entries.
@@ -480,21 +448,21 @@ TEST(ProcessMemoryDumpTest, GuidsTest) {
const auto process_token_one = UnguessableToken::Create();
const auto process_token_two = UnguessableToken::Create();
- ProcessMemoryDump pmd1(nullptr, dump_args);
+ ProcessMemoryDump pmd1(dump_args);
pmd1.set_process_token_for_testing(process_token_one);
MemoryAllocatorDump* mad1 = pmd1.CreateAllocatorDump("foo");
- ProcessMemoryDump pmd2(nullptr, dump_args);
+ ProcessMemoryDump pmd2(dump_args);
pmd2.set_process_token_for_testing(process_token_one);
MemoryAllocatorDump* mad2 = pmd2.CreateAllocatorDump("foo");
// If we don't pass the argument we get a random PMD:
- ProcessMemoryDump pmd3(nullptr, dump_args);
+ ProcessMemoryDump pmd3(dump_args);
MemoryAllocatorDump* mad3 = pmd3.CreateAllocatorDump("foo");
// PMD's for different processes produce different GUIDs even for the same
// names:
- ProcessMemoryDump pmd4(nullptr, dump_args);
+ ProcessMemoryDump pmd4(dump_args);
pmd4.set_process_token_for_testing(process_token_two);
MemoryAllocatorDump* mad4 = pmd4.CreateAllocatorDump("foo");
@@ -508,7 +476,13 @@ TEST(ProcessMemoryDumpTest, GuidsTest) {
}
#if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
-TEST(ProcessMemoryDumpTest, CountResidentBytes) {
+#if defined(OS_FUCHSIA)
+// TODO(crbug.com/851760): Counting resident bytes is not supported on Fuchsia.
+#define MAYBE_CountResidentBytes DISABLED_CountResidentBytes
+#else
+#define MAYBE_CountResidentBytes CountResidentBytes
+#endif
+TEST(ProcessMemoryDumpTest, MAYBE_CountResidentBytes) {
const size_t page_size = ProcessMemoryDump::GetSystemPageSize();
// Allocate few page of dirty memory and check if it is resident.
@@ -529,7 +503,14 @@ TEST(ProcessMemoryDumpTest, CountResidentBytes) {
Unmap(memory2, kVeryLargeMemorySize);
}
-TEST(ProcessMemoryDumpTest, CountResidentBytesInSharedMemory) {
+#if defined(OS_FUCHSIA)
+// TODO(crbug.com/851760): Counting resident bytes is not supported on Fuchsia.
+#define MAYBE_CountResidentBytesInSharedMemory \
+ DISABLED_CountResidentBytesInSharedMemory
+#else
+#define MAYBE_CountResidentBytesInSharedMemory CountResidentBytesInSharedMemory
+#endif
+TEST(ProcessMemoryDumpTest, MAYBE_CountResidentBytesInSharedMemory) {
#if defined(OS_IOS)
// TODO(crbug.com/748410): Reenable this test.
if (!base::ios::IsRunningOnIOS10OrLater()) {
diff --git a/chromium/base/trace_event/trace_category_unittest.cc b/chromium/base/trace_event/trace_category_unittest.cc
index 964064ebf9b..1370f5e9064 100644
--- a/chromium/base/trace_event/trace_category_unittest.cc
+++ b/chromium/base/trace_event/trace_category_unittest.cc
@@ -13,6 +13,7 @@
#include "base/threading/thread.h"
#include "base/trace_event/category_registry.h"
#include "base/trace_event/trace_category.h"
+#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
@@ -119,7 +120,13 @@ TEST_F(TraceCategoryTest, Basic) {
// Tries to cover the case of multiple threads creating the same category
// simultaneously. Should never end up with distinct entries with the same name.
-TEST_F(TraceCategoryTest, ThreadRaces) {
+#if defined(OS_FUCHSIA)
+// TODO(crbug.com/738275): This is flaky on Fuchsia.
+#define MAYBE_ThreadRaces DISABLED_ThreadRaces
+#else
+#define MAYBE_ThreadRaces ThreadRaces
+#endif
+TEST_F(TraceCategoryTest, MAYBE_ThreadRaces) {
const int kNumThreads = 32;
std::unique_ptr<Thread> threads[kNumThreads];
for (int i = 0; i < kNumThreads; i++) {
diff --git a/chromium/base/trace_event/trace_config.cc b/chromium/base/trace_event/trace_config.cc
index 624a29c47af..9d6a9d48744 100644
--- a/chromium/base/trace_event/trace_config.cc
+++ b/chromium/base/trace_event/trace_config.cc
@@ -51,6 +51,9 @@ const char kEventFiltersParam[] = "event_filters";
const char kFilterPredicateParam[] = "filter_predicate";
const char kFilterArgsParam[] = "filter_args";
+// String parameter used to parse process filter.
+const char kIncludedProcessesParam[] = "included_process_ids";
+
class ConvertableTraceConfigToTraceFormat
: public base::trace_event::ConvertableToTraceFormat {
public:
@@ -115,6 +118,56 @@ void TraceConfig::MemoryDumpConfig::Merge(
config.heap_profiler_options.breakdown_threshold_bytes);
}
+TraceConfig::ProcessFilterConfig::ProcessFilterConfig() = default;
+
+TraceConfig::ProcessFilterConfig::ProcessFilterConfig(
+ const ProcessFilterConfig& other) = default;
+
+TraceConfig::ProcessFilterConfig::ProcessFilterConfig(
+ const std::unordered_set<base::ProcessId>& included_process_ids)
+ : included_process_ids_(included_process_ids) {}
+
+TraceConfig::ProcessFilterConfig::~ProcessFilterConfig() = default;
+
+void TraceConfig::ProcessFilterConfig::Clear() {
+ included_process_ids_.clear();
+}
+
+void TraceConfig::ProcessFilterConfig::Merge(
+ const ProcessFilterConfig& config) {
+ included_process_ids_.insert(config.included_process_ids_.begin(),
+ config.included_process_ids_.end());
+}
+
+void TraceConfig::ProcessFilterConfig::InitializeFromConfigDict(
+ const base::DictionaryValue& dict) {
+ included_process_ids_.clear();
+ const Value* value =
+ dict.FindKeyOfType(kIncludedProcessesParam, Value::Type::LIST);
+ if (!value)
+ return;
+ for (auto& pid_value : value->GetList()) {
+ if (pid_value.is_int())
+ included_process_ids_.insert(pid_value.GetInt());
+ }
+}
+
+void TraceConfig::ProcessFilterConfig::ToDict(DictionaryValue* dict) const {
+ if (included_process_ids_.empty())
+ return;
+ Value* list = dict->SetKey(kIncludedProcessesParam, Value(Value::Type::LIST));
+ std::set<base::ProcessId> ordered_set(included_process_ids_.begin(),
+ included_process_ids_.end());
+ for (auto process_id : ordered_set)
+ list->GetList().emplace_back(static_cast<int>(process_id));
+}
+
+bool TraceConfig::ProcessFilterConfig::IsEnabled(
+ base::ProcessId process_id) const {
+ return included_process_ids_.empty() ||
+ included_process_ids_.count(process_id);
+}
+
TraceConfig::EventFilterConfig::EventFilterConfig(
const std::string& predicate_name)
: predicate_name_(predicate_name) {}
@@ -237,6 +290,7 @@ TraceConfig& TraceConfig::operator=(const TraceConfig& rhs) {
enable_systrace_ = rhs.enable_systrace_;
enable_argument_filter_ = rhs.enable_argument_filter_;
category_filter_ = rhs.category_filter_;
+ process_filter_config_ = rhs.process_filter_config_;
memory_dump_config_ = rhs.memory_dump_config_;
event_filters_ = rhs.event_filters_;
return *this;
@@ -274,8 +328,8 @@ void TraceConfig::Merge(const TraceConfig& config) {
}
category_filter_.Merge(config.category_filter_);
-
memory_dump_config_.Merge(config.memory_dump_config_);
+ process_filter_config_.Merge(config.process_filter_config_);
event_filters_.insert(event_filters_.end(), config.event_filters().begin(),
config.event_filters().end());
@@ -287,6 +341,7 @@ void TraceConfig::Clear() {
enable_argument_filter_ = false;
category_filter_.Clear();
memory_dump_config_.Clear();
+ process_filter_config_.Clear();
event_filters_.clear();
}
@@ -317,6 +372,7 @@ void TraceConfig::InitializeFromConfigDict(const DictionaryValue& dict) {
dict.GetBoolean(kEnableArgumentFilterParam, &val) ? val : false;
category_filter_.InitializeFromConfigDict(dict);
+ process_filter_config_.InitializeFromConfigDict(dict);
const base::ListValue* category_event_filters = nullptr;
if (dict.GetList(kEventFiltersParam, &category_event_filters))
@@ -447,6 +503,10 @@ void TraceConfig::SetDefaultMemoryDumpConfig() {
memory_dump_config_.allowed_dump_modes = GetDefaultAllowedMemoryDumpModes();
}
+void TraceConfig::SetProcessFilterConfig(const ProcessFilterConfig& config) {
+ process_filter_config_ = config;
+}
+
void TraceConfig::SetEventFiltersFromConfigList(
const base::ListValue& category_event_filters) {
event_filters_.clear();
@@ -477,6 +537,7 @@ std::unique_ptr<DictionaryValue> TraceConfig::ToDict() const {
dict->SetBoolean(kEnableArgumentFilterParam, enable_argument_filter_);
category_filter_.ToDict(dict.get());
+ process_filter_config_.ToDict(dict.get());
if (!event_filters_.empty()) {
std::unique_ptr<base::ListValue> filter_list(new base::ListValue());
diff --git a/chromium/base/trace_event/trace_config.h b/chromium/base/trace_event/trace_config.h
index decd54d1888..b1d809b58bf 100644
--- a/chromium/base/trace_event/trace_config.h
+++ b/chromium/base/trace_event/trace_config.h
@@ -88,6 +88,32 @@ class BASE_EXPORT TraceConfig {
HeapProfiler heap_profiler_options;
};
+ class BASE_EXPORT ProcessFilterConfig {
+ public:
+ ProcessFilterConfig();
+ explicit ProcessFilterConfig(
+ const std::unordered_set<base::ProcessId>& included_process_ids);
+ ProcessFilterConfig(const ProcessFilterConfig&);
+ ~ProcessFilterConfig();
+
+ bool empty() const { return included_process_ids_.empty(); }
+
+ void Clear();
+ void Merge(const ProcessFilterConfig&);
+
+ void InitializeFromConfigDict(const base::DictionaryValue&);
+ void ToDict(DictionaryValue*) const;
+
+ bool IsEnabled(base::ProcessId) const;
+
+ bool operator==(const ProcessFilterConfig& other) const {
+ return included_process_ids_ == other.included_process_ids_;
+ }
+
+ private:
+ std::unordered_set<base::ProcessId> included_process_ids_;
+ };
+
class BASE_EXPORT EventFilterConfig {
public:
EventFilterConfig(const std::string& predicate_name);
@@ -238,6 +264,11 @@ class BASE_EXPORT TraceConfig {
return memory_dump_config_;
}
+ const ProcessFilterConfig& process_filter_config() const {
+ return process_filter_config_;
+ }
+ void SetProcessFilterConfig(const ProcessFilterConfig&);
+
const EventFilters& event_filters() const { return event_filters_; }
void SetEventFilters(const EventFilters& filter_configs) {
event_filters_ = filter_configs;
@@ -279,6 +310,7 @@ class BASE_EXPORT TraceConfig {
TraceConfigCategoryFilter category_filter_;
MemoryDumpConfig memory_dump_config_;
+ ProcessFilterConfig process_filter_config_;
EventFilters event_filters_;
};
diff --git a/chromium/base/trace_event/trace_config_memory_test_util.h b/chromium/base/trace_event/trace_config_memory_test_util.h
index 57608fd6412..cc49a65c2bd 100644
--- a/chromium/base/trace_event/trace_config_memory_test_util.h
+++ b/chromium/base/trace_event/trace_config_memory_test_util.h
@@ -144,32 +144,6 @@ class TraceConfigMemoryTestUtil {
"}",
MemoryDumpManager::kTraceCategory, period_ms);
}
-
- static std::string GetTraceConfig_PeakDetectionTrigger(int heavy_period) {
- return StringPrintf(
- "{"
- "\"enable_argument_filter\":false,"
- "\"enable_systrace\":false,"
- "\"excluded_categories\":["
- "\"*\""
- "],"
- "\"included_categories\":["
- "\"%s\""
- "],"
- "\"memory_dump_config\":{"
- "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
- "\"triggers\":["
- "{"
- "\"min_time_between_dumps_ms\":%d,"
- "\"mode\":\"detailed\","
- "\"type\":\"peak_memory_usage\""
- "}"
- "]"
- "},"
- "\"record_mode\":\"record-until-full\""
- "}",
- MemoryDumpManager::kTraceCategory, heavy_period);
- }
};
} // namespace trace_event
diff --git a/chromium/base/trace_event/trace_config_unittest.cc b/chromium/base/trace_event/trace_config_unittest.cc
index 3cb6d61b7d2..efdbffb6ef0 100644
--- a/chromium/base/trace_event/trace_config_unittest.cc
+++ b/chromium/base/trace_event/trace_config_unittest.cc
@@ -56,7 +56,7 @@ const char kCustomTraceConfigString[] =
"{"
"\"min_time_between_dumps_ms\":1000,"
"\"mode\":\"detailed\","
- "\"type\":\"peak_memory_usage\""
+ "\"type\":\"periodic_interval\""
"}"
"]"
"},"
@@ -634,16 +634,6 @@ TEST(TraceConfigTest, TraceConfigFromMemoryConfigString) {
EXPECT_EQ(1u, tc3.memory_dump_config().triggers[0].min_time_between_dumps_ms);
EXPECT_EQ(MemoryDumpLevelOfDetail::BACKGROUND,
tc3.memory_dump_config().triggers[0].level_of_detail);
-
- std::string tc_str4 =
- TraceConfigMemoryTestUtil::GetTraceConfig_PeakDetectionTrigger(
- 1 /*heavy_period */);
- TraceConfig tc4(tc_str4);
- EXPECT_EQ(tc_str4, tc4.ToString());
- ASSERT_EQ(1u, tc4.memory_dump_config().triggers.size());
- EXPECT_EQ(1u, tc4.memory_dump_config().triggers[0].min_time_between_dumps_ms);
- EXPECT_EQ(MemoryDumpLevelOfDetail::DETAILED,
- tc4.memory_dump_config().triggers[0].level_of_detail);
}
TEST(TraceConfigTest, EmptyMemoryDumpConfigTest) {
diff --git a/chromium/base/trace_event/trace_event_etw_export_win.cc b/chromium/base/trace_event/trace_event_etw_export_win.cc
index 993a222c002..e6d81079322 100644
--- a/chromium/base/trace_event/trace_event_etw_export_win.cc
+++ b/chromium/base/trace_event/trace_event_etw_export_win.cc
@@ -61,21 +61,22 @@ namespace {
// group names or the hex representation. We only support the latter. Also, we
// ignore the level.
const char* const kFilteredEventGroupNames[] = {
- "benchmark", // 0x1
- "blink", // 0x2
- "browser", // 0x4
- "cc", // 0x8
- "evdev", // 0x10
- "gpu", // 0x20
- "input", // 0x40
- "netlog", // 0x80
- "sequence_manager", // 0x100
- "toplevel", // 0x200
- "v8", // 0x400
- "disabled-by-default-cc.debug", // 0x800
- "disabled-by-default-cc.debug.picture", // 0x1000
- "disabled-by-default-toplevel.flow", // 0x2000
- "startup"}; // 0x4000
+ "benchmark", // 0x1
+ "blink", // 0x2
+ "browser", // 0x4
+ "cc", // 0x8
+ "evdev", // 0x10
+ "gpu", // 0x20
+ "input", // 0x40
+ "netlog", // 0x80
+ "sequence_manager", // 0x100
+ "toplevel", // 0x200
+ "v8", // 0x400
+ "disabled-by-default-cc.debug", // 0x800
+ "disabled-by-default-cc.debug.picture", // 0x1000
+ "disabled-by-default-toplevel.flow", // 0x2000
+ "startup", // 0x4000
+ "latency"}; // 0x8000
const char kOtherEventsGroupName[] = "__OTHER_EVENTS"; // 0x2000000000000000
const char kDisabledOtherEventsGroupName[] =
"__DISABLED_OTHER_EVENTS"; // 0x4000000000000000
diff --git a/chromium/base/trace_event/trace_event_impl.h b/chromium/base/trace_event/trace_event_impl.h
index b1c67b10d46..4b4b88f5bdd 100644
--- a/chromium/base/trace_event/trace_event_impl.h
+++ b/chromium/base/trace_event/trace_event_impl.h
@@ -146,6 +146,10 @@ class BASE_EXPORT TraceEvent {
const char* arg_name(size_t index) const { return arg_names_[index]; }
const TraceValue& arg_value(size_t index) const { return arg_values_[index]; }
+ const ConvertableToTraceFormat* arg_convertible_value(size_t index) const {
+ return convertable_values_[index].get();
+ }
+
#if defined(OS_ANDROID)
void SendToATrace();
#endif
diff --git a/chromium/base/trace_event/trace_event_memory_overhead.cc b/chromium/base/trace_event/trace_event_memory_overhead.cc
index d5875f8b4f8..dd7b30255e1 100644
--- a/chromium/base/trace_event/trace_event_memory_overhead.cc
+++ b/chromium/base/trace_event/trace_event_memory_overhead.cc
@@ -47,6 +47,8 @@ const char* ObjectTypeToString(TraceEventMemoryOverhead::ObjectType type) {
return "base::Value";
case TraceEventMemoryOverhead::kTraceEventMemoryOverhead:
return "TraceEventMemoryOverhead";
+ case TraceEventMemoryOverhead::kFrameMetrics:
+ return "FrameMetrics";
case TraceEventMemoryOverhead::kLast:
NOTREACHED();
}
diff --git a/chromium/base/trace_event/trace_event_memory_overhead.h b/chromium/base/trace_event/trace_event_memory_overhead.h
index 1587a3099f6..69468d46409 100644
--- a/chromium/base/trace_event/trace_event_memory_overhead.h
+++ b/chromium/base/trace_event/trace_event_memory_overhead.h
@@ -39,6 +39,7 @@ class BASE_EXPORT TraceEventMemoryOverhead {
kStdString,
kBaseValue,
kTraceEventMemoryOverhead,
+ kFrameMetrics,
kLast
};
diff --git a/chromium/base/trace_event/trace_log.cc b/chromium/base/trace_event/trace_log.cc
index a0d44cc30ec..4eb69584726 100644
--- a/chromium/base/trace_event/trace_log.cc
+++ b/chromium/base/trace_event/trace_log.cc
@@ -559,6 +559,8 @@ void TraceLog::GetKnownCategoryGroups(
void TraceLog::SetEnabled(const TraceConfig& trace_config,
uint8_t modes_to_enable) {
+ DCHECK(trace_config.process_filter_config().IsEnabled(process_id_));
+
std::vector<EnabledStateObserver*> observer_list;
std::map<AsyncEnabledStateObserver*, RegisteredAsyncObserver> observer_map;
{
diff --git a/chromium/base/win/async_operation.h b/chromium/base/win/async_operation.h
index 2c41ddf6122..97f7aab61d4 100644
--- a/chromium/base/win/async_operation.h
+++ b/chromium/base/win/async_operation.h
@@ -108,57 +108,46 @@ using LogicalT = typename ABI::Windows::Foundation::Internal::GetLogicalType<
template <typename T>
using InterfaceT = std::remove_pointer_t<AbiT<T>>;
-// Implementation of shared functionality.
-template <class T>
-class AsyncOperationBase
- : public Microsoft::WRL::RuntimeClass<
- Microsoft::WRL::RuntimeClassFlags<
- Microsoft::WRL::WinRt | Microsoft::WRL::InhibitRoOriginateError>,
- ABI::Windows::Foundation::IAsyncOperation<T>> {
- public:
- using Handler = ABI::Windows::Foundation::IAsyncOperationCompletedHandler<T>;
-
- AsyncOperationBase() = default;
- ~AsyncOperationBase() { DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); }
-
- // ABI::Windows::Foundation::IAsyncOperation:
- IFACEMETHODIMP put_Completed(Handler* handler) override {
- DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
- handler_ = handler;
- return S_OK;
- }
-
- IFACEMETHODIMP get_Completed(Handler** handler) override {
- DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
- return handler_.CopyTo(handler);
- }
-
- protected:
- void InvokeCompletedHandler() {
- handler_->Invoke(this, ABI::Windows::Foundation::AsyncStatus::Completed);
- }
+// Compile time switch to decide what container to use for the async results for
+// |T|. Depends on whether the underlying Abi type is a pointer to IUnknown or
+// not. It queries the internals of Windows::Foundation to obtain this
+// information.
+template <typename T>
+using ResultT =
+ std::conditional_t<std::is_convertible<AbiT<T>, IUnknown*>::value,
+ Microsoft::WRL::ComPtr<std::remove_pointer_t<AbiT<T>>>,
+ AbiT<T>>;
- THREAD_CHECKER(thread_checker_);
+template <typename T>
+using StorageT =
+ std::conditional_t<std::is_convertible<AbiT<T>, IUnknown*>::value,
+ Microsoft::WRL::ComPtr<std::remove_pointer_t<AbiT<T>>>,
+ base::Optional<AbiT<T>>>;
- private:
- Microsoft::WRL::ComPtr<Handler> handler_;
+template <typename T>
+HRESULT CopyStorage(const Microsoft::WRL::ComPtr<T>& storage, T** results) {
+ return storage.CopyTo(results);
+}
- DISALLOW_COPY_AND_ASSIGN(AsyncOperationBase);
-};
+template <typename T>
+HRESULT CopyStorage(const base::Optional<T>& storage, T* results) {
+ *results = *storage;
+ return S_OK;
+}
} // namespace internal
-template <typename T, typename Enable = void>
-class AsyncOperation;
-
-template <typename T>
-class AsyncOperation<
- T,
- std::enable_if_t<std::is_base_of<IUnknown, internal::InterfaceT<T>>::value>>
- : public internal::AsyncOperationBase<T> {
+template <class T>
+class AsyncOperation
+ : public Microsoft::WRL::RuntimeClass<
+ Microsoft::WRL::RuntimeClassFlags<
+ Microsoft::WRL::WinRt | Microsoft::WRL::InhibitRoOriginateError>,
+ ABI::Windows::Foundation::IAsyncOperation<T>> {
public:
- using InterfacePointer = Microsoft::WRL::ComPtr<internal::InterfaceT<T>>;
- using ResultCallback = base::OnceCallback<void(InterfacePointer)>;
+ using StorageT = internal::StorageT<T>;
+ using ResultT = internal::ResultT<T>;
+ using Handler = ABI::Windows::Foundation::IAsyncOperationCompletedHandler<T>;
+ using ResultCallback = base::OnceCallback<void(ResultT)>;
AsyncOperation() : weak_factory_(this) {
// Note: This can't be done in the constructor initializer list. This is
@@ -168,74 +157,49 @@ class AsyncOperation<
base::BindOnce(&AsyncOperation::OnResult, weak_factory_.GetWeakPtr());
}
- ResultCallback callback() {
- // Note: `this->` here and below is necessary due to the
- // -Wmicrosoft-template compiler warning.
- DCHECK_CALLED_ON_VALID_THREAD(this->thread_checker_);
- DCHECK(!callback_.is_null());
- return std::move(callback_);
- }
+ ~AsyncOperation() { DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); }
// ABI::Windows::Foundation::IAsyncOperation:
- IFACEMETHODIMP GetResults(internal::AbiT<T>* results) override {
- DCHECK_CALLED_ON_VALID_THREAD(this->thread_checker_);
- return ptr_ ? ptr_.CopyTo(results) : E_PENDING;
+ IFACEMETHODIMP put_Completed(Handler* handler) override {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+ handler_ = handler;
+ return S_OK;
}
-
- private:
- void OnResult(InterfacePointer ptr) {
- DCHECK_CALLED_ON_VALID_THREAD(this->thread_checker_);
- DCHECK(!ptr_);
- ptr_ = std::move(ptr);
- this->InvokeCompletedHandler();
+ IFACEMETHODIMP get_Completed(Handler** handler) override {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+ return handler_.CopyTo(handler);
}
-
- ResultCallback callback_;
- InterfacePointer ptr_;
- base::WeakPtrFactory<AsyncOperation> weak_factory_;
-};
-
-template <typename T>
-class AsyncOperation<
- T,
- std::enable_if_t<
- !std::is_base_of<IUnknown, internal::InterfaceT<T>>::value>>
- : public internal::AsyncOperationBase<T> {
- public:
- using ResultCallback = base::OnceCallback<void(T)>;
-
- AsyncOperation() : weak_factory_(this) {
- callback_ =
- base::BindOnce(&AsyncOperation::OnResult, weak_factory_.GetWeakPtr());
+ IFACEMETHODIMP GetResults(internal::AbiT<T>* results) override {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+ return storage_ ? internal::CopyStorage(storage_, results) : E_PENDING;
}
ResultCallback callback() {
- DCHECK_CALLED_ON_VALID_THREAD(this->thread_checker_);
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
DCHECK(!callback_.is_null());
return std::move(callback_);
}
- // ABI::Windows::Foundation::IAsyncOperation:
- IFACEMETHODIMP GetResults(internal::AbiT<T>* results) override {
- DCHECK_CALLED_ON_VALID_THREAD(this->thread_checker_);
- if (!value_)
- return E_PENDING;
-
- *results = *value_;
- return S_OK;
+ private:
+ void InvokeCompletedHandler() {
+ handler_->Invoke(this, ABI::Windows::Foundation::AsyncStatus::Completed);
}
- private:
- void OnResult(T result) {
- DCHECK_CALLED_ON_VALID_THREAD(this->thread_checker_);
- DCHECK(!value_);
- value_.emplace(std::move(result));
- this->InvokeCompletedHandler();
+ void OnResult(ResultT result) {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+ DCHECK(!storage_);
+ storage_ = std::move(result);
+ InvokeCompletedHandler();
}
ResultCallback callback_;
- base::Optional<T> value_;
+ Microsoft::WRL::ComPtr<Handler> handler_;
+ StorageT storage_;
+
+ THREAD_CHECKER(thread_checker_);
base::WeakPtrFactory<AsyncOperation> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(AsyncOperation);
};
} // namespace win
diff --git a/chromium/base/win/reference.h b/chromium/base/win/reference.h
new file mode 100644
index 00000000000..7fadfb267f7
--- /dev/null
+++ b/chromium/base/win/reference.h
@@ -0,0 +1,49 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_REFERENCE_H_
+#define BASE_WIN_REFERENCE_H_
+
+#include <windows.foundation.collections.h>
+#include <wrl/implements.h>
+
+#include <type_traits>
+#include <utility>
+
+#include "base/macros.h"
+
+namespace base {
+namespace win {
+
+// Implementation of the UWP's IReference interface.
+template <typename T>
+class Reference
+ : public Microsoft::WRL::RuntimeClass<
+ Microsoft::WRL::RuntimeClassFlags<
+ Microsoft::WRL::WinRt | Microsoft::WRL::InhibitRoOriginateError>,
+ ABI::Windows::Foundation::IReference<T>> {
+ public:
+ using AbiT = typename ABI::Windows::Foundation::Internal::GetAbiType<
+ typename ABI::Windows::Foundation::IReference<T>::T_complex>::type;
+
+ explicit Reference(const AbiT& value) : value_(value) {}
+ explicit Reference(AbiT&& value) : value_(std::move(value)) {}
+
+ // ABI::Windows::Foundation::IReference:
+ IFACEMETHODIMP get_Value(AbiT* value) override {
+ *value = value_;
+ return S_OK;
+ }
+
+ private:
+ ~Reference() = default;
+ AbiT value_;
+
+ DISALLOW_COPY_AND_ASSIGN(Reference);
+};
+
+} // namespace win
+} // namespace base
+
+#endif // BASE_WIN_REFERENCE_H_
diff --git a/chromium/base/win/reference_unittest.cc b/chromium/base/win/reference_unittest.cc
new file mode 100644
index 00000000000..4116872cce6
--- /dev/null
+++ b/chromium/base/win/reference_unittest.cc
@@ -0,0 +1,38 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/reference.h"
+
+#include <windows.foundation.h>
+#include <wrl/client.h>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace win {
+
+namespace {
+
+using Microsoft::WRL::Make;
+
+} // namespace
+
+TEST(ReferenceTest, Value) {
+ auto ref = Make<Reference<int>>(123);
+ int value = 0;
+ HRESULT hr = ref->get_Value(&value);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(123, value);
+}
+
+TEST(ReferenceTest, ValueAggregate) {
+ auto ref = Make<Reference<bool>>(true);
+ boolean value = false;
+ HRESULT hr = ref->get_Value(&value);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_TRUE(value);
+}
+
+} // namespace win
+} // namespace base
diff --git a/chromium/base/win/vector.cc b/chromium/base/win/vector.cc
new file mode 100644
index 00000000000..3107832730a
--- /dev/null
+++ b/chromium/base/win/vector.cc
@@ -0,0 +1,24 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/vector.h"
+
+namespace base {
+namespace win {
+namespace internal {
+
+HRESULT VectorChangedEventArgs::get_CollectionChange(
+ ABI::Windows::Foundation::Collections::CollectionChange* value) {
+ *value = change_;
+ return S_OK;
+}
+
+HRESULT VectorChangedEventArgs::get_Index(unsigned int* value) {
+ *value = index_;
+ return S_OK;
+}
+
+} // namespace internal
+} // namespace win
+} // namespace base
diff --git a/chromium/base/win/vector.h b/chromium/base/win/vector.h
new file mode 100644
index 00000000000..aca6c4e245d
--- /dev/null
+++ b/chromium/base/win/vector.h
@@ -0,0 +1,366 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_VECTOR_H_
+#define BASE_WIN_VECTOR_H_
+
+#include <ivectorchangedeventargs.h>
+#include <windows.foundation.collections.h>
+#include <wrl/implements.h>
+
+#include <algorithm>
+#include <iterator>
+#include <utility>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/containers/flat_map.h"
+#include "base/logging.h"
+
+namespace base {
+namespace win {
+
+template <typename T>
+class Vector;
+
+namespace internal {
+
+template <typename T>
+using Complex =
+ typename ABI::Windows::Foundation::Collections::IVector<T>::T_complex;
+
+template <typename T>
+using Logical = typename ABI::Windows::Foundation::Internal::GetLogicalType<
+ Complex<T>>::type;
+
+template <typename T>
+using Abi =
+ typename ABI::Windows::Foundation::Internal::GetAbiType<Complex<T>>::type;
+
+class BASE_EXPORT VectorChangedEventArgs
+ : public Microsoft::WRL::RuntimeClass<
+ Microsoft::WRL::RuntimeClassFlags<
+ Microsoft::WRL::WinRtClassicComMix |
+ Microsoft::WRL::InhibitRoOriginateError>,
+ ABI::Windows::Foundation::Collections::IVectorChangedEventArgs> {
+ public:
+ VectorChangedEventArgs(
+ ABI::Windows::Foundation::Collections::CollectionChange change,
+ unsigned int index)
+ : change_(change), index_(index) {}
+
+ ~VectorChangedEventArgs() override = default;
+
+ // ABI::Windows::Foundation::Collections::IVectorChangedEventArgs:
+ IFACEMETHODIMP get_CollectionChange(
+ ABI::Windows::Foundation::Collections::CollectionChange* value) override;
+ IFACEMETHODIMP get_Index(unsigned int* value) override;
+
+ private:
+ const ABI::Windows::Foundation::Collections::CollectionChange change_;
+ const unsigned int index_;
+};
+
+template <typename T>
+class VectorView
+ : public Microsoft::WRL::RuntimeClass<
+ Microsoft::WRL::RuntimeClassFlags<
+ Microsoft::WRL::WinRtClassicComMix |
+ Microsoft::WRL::InhibitRoOriginateError>,
+ ABI::Windows::Foundation::Collections::IVectorView<Logical<T>>,
+ ABI::Windows::Foundation::Collections::VectorChangedEventHandler<
+ Logical<T>>> {
+ public:
+ using LogicalT = Logical<T>;
+ using AbiT = Abi<T>;
+
+ explicit VectorView(Microsoft::WRL::ComPtr<Vector<LogicalT>> vector)
+ : vector_(std::move(vector)) {
+ vector_->add_VectorChanged(this, &vector_changed_token_);
+ }
+
+ ~VectorView() {
+ if (vector_)
+ vector_->remove_VectorChanged(vector_changed_token_);
+ }
+
+ // ABI::Windows::Foundation::Collections::IVectorView:
+ IFACEMETHODIMP GetAt(unsigned index, AbiT* item) override {
+ return vector_ ? vector_->GetAt(index, item) : E_CHANGED_STATE;
+ }
+
+ IFACEMETHODIMP get_Size(unsigned* size) override {
+ return vector_ ? vector_->get_Size(size) : E_CHANGED_STATE;
+ }
+
+ IFACEMETHODIMP IndexOf(AbiT value, unsigned* index, boolean* found) override {
+ return vector_ ? vector_->IndexOf(std::move(value), index, found)
+ : E_CHANGED_STATE;
+ }
+
+ IFACEMETHODIMP GetMany(unsigned start_index,
+ unsigned capacity,
+ AbiT* value,
+ unsigned* actual) override {
+ return vector_ ? vector_->GetMany(start_index, capacity, value, actual)
+ : E_CHANGED_STATE;
+ }
+
+ // ABI::Windows::Foundation::Collections::VectorChangedEventHandler:
+ IFACEMETHODIMP Invoke(
+ ABI::Windows::Foundation::Collections::IObservableVector<LogicalT>*
+ sender,
+ ABI::Windows::Foundation::Collections::IVectorChangedEventArgs* e)
+ override {
+ DCHECK_EQ(vector_.Get(), sender);
+ vector_.Reset();
+ sender->remove_VectorChanged(vector_changed_token_);
+ return S_OK;
+ }
+
+ private:
+ Microsoft::WRL::ComPtr<Vector<LogicalT>> vector_;
+ EventRegistrationToken vector_changed_token_;
+};
+
+template <typename T>
+HRESULT CopyTo(const T& value, T* ptr) {
+ *ptr = value;
+ return S_OK;
+}
+
+template <typename T>
+HRESULT CopyTo(const Microsoft::WRL::ComPtr<T>& com_ptr, T** ptr) {
+ return com_ptr.CopyTo(ptr);
+}
+
+template <typename T>
+HRESULT CopyN(typename std::vector<T>::const_iterator first,
+ unsigned count,
+ T* result) {
+ std::copy_n(first, count, result);
+ return S_OK;
+}
+
+template <typename T>
+HRESULT CopyN(
+ typename std::vector<Microsoft::WRL::ComPtr<T>>::const_iterator first,
+ unsigned count,
+ T** result) {
+ for (unsigned i = 0; i < count; ++i)
+ CopyTo(*first++, result++);
+ return S_OK;
+}
+
+template <typename T>
+bool IsEqual(const T& lhs, const T& rhs) {
+ return lhs == rhs;
+}
+
+template <typename T>
+bool IsEqual(const Microsoft::WRL::ComPtr<T>& com_ptr, const T* ptr) {
+ return com_ptr.Get() == ptr;
+}
+
+} // namespace internal
+
+// This file provides an implementation of Windows::Foundation::IVector. It
+// functions as a thin wrapper around an std::vector, and dispatches method
+// calls to either the corresponding std::vector API or appropriate
+// std::algorithms. Furthermore, it notifies its observers whenever its
+// observable state changes. A base::win::Vector can be constructed for any type
+// T, and is implicitly constructible from a std::vector. In the case where T is
+// a pointer derived from IUnknown, the std::vector needs to be of type
+// Microsoft::WRL::ComPtr<T>. This enforces proper reference counting and
+// improves safety.
+template <typename T>
+class Vector
+ : public Microsoft::WRL::RuntimeClass<
+ Microsoft::WRL::RuntimeClassFlags<
+ Microsoft::WRL::WinRt | Microsoft::WRL::InhibitRoOriginateError>,
+ ABI::Windows::Foundation::Collections::IVector<internal::Logical<T>>,
+ ABI::Windows::Foundation::Collections::IObservableVector<
+ internal::Logical<T>>> {
+ public:
+ // windows.foundation.collections.h defines the following template and
+ // semantics in Windows::Foundation::Internal:
+ //
+ // template <class LogicalType, class AbiType>
+ // struct AggregateType;
+ //
+ // LogicalType - the Windows Runtime type (eg, runtime class, interface
+ // group, etc) being provided as an argument to an _impl
+ // template, when that type cannot be represented at the ABI.
+ // AbiType - the type used for marshalling, ie "at the ABI", for the
+ // logical type.
+ using LogicalT = internal::Logical<T>;
+ using AbiT = internal::Abi<T>;
+
+ using StorageT =
+ std::conditional_t<std::is_convertible<AbiT, IUnknown*>::value,
+ Microsoft::WRL::ComPtr<std::remove_pointer_t<AbiT>>,
+ AbiT>;
+
+ Vector() = default;
+ explicit Vector(const std::vector<StorageT>& vector) : vector_(vector) {}
+ explicit Vector(std::vector<StorageT>&& vector)
+ : vector_(std::move(vector)) {}
+
+ // ABI::Windows::Foundation::Collections::IVector:
+ IFACEMETHODIMP GetAt(unsigned index, AbiT* item) override {
+ if (index >= vector_.size())
+ return E_BOUNDS;
+ return internal::CopyTo(vector_[index], item);
+ }
+
+ IFACEMETHODIMP get_Size(unsigned* size) override {
+ *size = vector_.size();
+ return S_OK;
+ }
+
+ IFACEMETHODIMP GetView(
+ ABI::Windows::Foundation::Collections::IVectorView<LogicalT>** view)
+ override {
+ return Microsoft::WRL::Make<internal::VectorView<LogicalT>>(this).CopyTo(
+ view);
+ }
+
+ IFACEMETHODIMP IndexOf(AbiT value, unsigned* index, boolean* found) override {
+ auto iter = std::find_if(vector_.begin(), vector_.end(),
+ [&value](const StorageT& elem) {
+ return internal::IsEqual(elem, value);
+ });
+ *index = iter != vector_.end() ? std::distance(vector_.begin(), iter) : 0;
+ *found = iter != vector_.end();
+ return S_OK;
+ }
+
+ IFACEMETHODIMP SetAt(unsigned index, AbiT item) override {
+ if (index >= vector_.size())
+ return E_BOUNDS;
+
+ vector_[index] = std::move(item);
+ NotifyVectorChanged(
+ ABI::Windows::Foundation::Collections::CollectionChange_ItemChanged,
+ index);
+ return S_OK;
+ }
+
+ IFACEMETHODIMP InsertAt(unsigned index, AbiT item) override {
+ if (index > vector_.size())
+ return E_BOUNDS;
+
+ vector_.insert(std::next(vector_.begin(), index), std::move(item));
+ NotifyVectorChanged(
+ ABI::Windows::Foundation::Collections::CollectionChange_ItemInserted,
+ index);
+ return S_OK;
+ }
+
+ IFACEMETHODIMP RemoveAt(unsigned index) override {
+ if (index >= vector_.size())
+ return E_BOUNDS;
+
+ vector_.erase(std::next(vector_.begin(), index));
+ NotifyVectorChanged(
+ ABI::Windows::Foundation::Collections::CollectionChange_ItemRemoved,
+ index);
+ return S_OK;
+ }
+
+ IFACEMETHODIMP Append(AbiT item) override {
+ vector_.push_back(std::move(item));
+ NotifyVectorChanged(
+ ABI::Windows::Foundation::Collections::CollectionChange_ItemInserted,
+ vector_.size() - 1);
+ return S_OK;
+ }
+
+ IFACEMETHODIMP RemoveAtEnd() override {
+ if (vector_.empty())
+ return E_BOUNDS;
+
+ vector_.pop_back();
+ NotifyVectorChanged(
+ ABI::Windows::Foundation::Collections::CollectionChange_ItemRemoved,
+ vector_.size());
+ return S_OK;
+ }
+
+ IFACEMETHODIMP Clear() override {
+ vector_.clear();
+ NotifyVectorChanged(
+ ABI::Windows::Foundation::Collections::CollectionChange_Reset, 0);
+ return S_OK;
+ }
+
+ IFACEMETHODIMP GetMany(unsigned start_index,
+ unsigned capacity,
+ AbiT* value,
+ unsigned* actual) override {
+ if (start_index > vector_.size())
+ return E_BOUNDS;
+
+ *actual = std::min<unsigned>(vector_.size() - start_index, capacity);
+ return internal::CopyN(std::next(vector_.begin(), start_index), *actual,
+ value);
+ }
+
+ IFACEMETHODIMP ReplaceAll(unsigned count, AbiT* value) override {
+ vector_.assign(value, std::next(value, count));
+ NotifyVectorChanged(
+ ABI::Windows::Foundation::Collections::CollectionChange_Reset, 0);
+ return S_OK;
+ }
+
+ // ABI::Windows::Foundation::Collections::IObservableVector:
+ IFACEMETHODIMP add_VectorChanged(
+ ABI::Windows::Foundation::Collections::VectorChangedEventHandler<
+ LogicalT>* handler,
+ EventRegistrationToken* token) override {
+ token->value = handler_id_++;
+ handlers_.emplace_hint(handlers_.end(), token->value, handler);
+ return S_OK;
+ }
+
+ IFACEMETHODIMP remove_VectorChanged(EventRegistrationToken token) override {
+ handlers_.erase(token.value);
+ return S_OK;
+ }
+
+ void NotifyVectorChanged(
+ ABI::Windows::Foundation::Collections::CollectionChange change,
+ unsigned int index) {
+ auto args =
+ Microsoft::WRL::Make<internal::VectorChangedEventArgs>(change, index);
+
+ // Invoking the handlers could result in mutations to the map, thus we make
+ // a copy beforehand.
+ auto handlers = handlers_;
+ for (auto& handler : handlers)
+ handler.second->Invoke(this, args.Get());
+ }
+
+ const std::vector<AbiT>& vector_for_testing() { return vector_; }
+
+ private:
+ ~Vector() override {
+ // Handlers should not outlive the Vector. Furthermore, they must ensure
+ // they are unregistered before the the handler is destroyed. This implies
+ // there should be no handlers left when the Vector is destructed.
+ DCHECK(handlers_.empty());
+ }
+
+ std::vector<StorageT> vector_;
+ base::flat_map<int64_t,
+ ABI::Windows::Foundation::Collections::
+ VectorChangedEventHandler<LogicalT>*>
+ handlers_;
+ int64_t handler_id_ = 0;
+};
+
+} // namespace win
+} // namespace base
+
+#endif // BASE_WIN_VECTOR_H_
diff --git a/chromium/base/win/vector_unittest.cc b/chromium/base/win/vector_unittest.cc
new file mode 100644
index 00000000000..33d0713f24a
--- /dev/null
+++ b/chromium/base/win/vector_unittest.cc
@@ -0,0 +1,638 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/vector.h"
+
+#include <windows.foundation.h>
+#include <wrl/client.h>
+
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace ABI {
+namespace Windows {
+namespace Foundation {
+namespace Collections {
+
+// Note: As UWP does not provide int specializations for IObservableVector and
+// VectorChangedEventHandler we need to supply our own. UUIDs were generated
+// using `uuidgen`.
+template <>
+struct __declspec(uuid("21c2c195-91a4-4fce-8346-2a85f4478e26"))
+ IObservableVector<int> : IObservableVector_impl<int> {};
+
+template <>
+struct __declspec(uuid("86b0071e-5e72-4d3d-82d3-420ebd2b2716"))
+ VectorChangedEventHandler<int> : VectorChangedEventHandler_impl<int> {};
+
+namespace {
+using UriPtrAggregate = Internal::AggregateType<Uri*, IUriRuntimeClass*>;
+}
+
+template <>
+struct __declspec(uuid("12311764-f245-4245-9dc9-bab258eddd4e"))
+ IObservableVector<Uri*> : IObservableVector_impl<UriPtrAggregate> {};
+
+template <>
+struct __declspec(uuid("050e4b78-71b2-43ff-bf7c-f6ba589aced9"))
+ VectorChangedEventHandler<Uri*>
+ : VectorChangedEventHandler_impl<UriPtrAggregate> {};
+
+} // namespace Collections
+} // namespace Foundation
+} // namespace Windows
+} // namespace ABI
+
+namespace base {
+namespace win {
+
+namespace {
+
+using ::testing::ElementsAre;
+using ::testing::IsEmpty;
+using ABI::Windows::Foundation::Uri;
+using ABI::Windows::Foundation::Collections::CollectionChange;
+using ABI::Windows::Foundation::Collections::CollectionChange_ItemChanged;
+using ABI::Windows::Foundation::Collections::CollectionChange_ItemInserted;
+using ABI::Windows::Foundation::Collections::CollectionChange_ItemRemoved;
+using ABI::Windows::Foundation::Collections::CollectionChange_Reset;
+using ABI::Windows::Foundation::Collections::IObservableVector;
+using ABI::Windows::Foundation::Collections::IVectorChangedEventArgs;
+using ABI::Windows::Foundation::Collections::IVectorView;
+using ABI::Windows::Foundation::Collections::VectorChangedEventHandler;
+using ABI::Windows::Foundation::IAsyncOperation;
+using Microsoft::WRL::ClassicCom;
+using Microsoft::WRL::ComPtr;
+using Microsoft::WRL::InhibitRoOriginateError;
+using Microsoft::WRL::Make;
+using Microsoft::WRL::RuntimeClass;
+using Microsoft::WRL::RuntimeClassFlags;
+
+template <typename T>
+class FakeVectorChangedEventHandler
+ : public RuntimeClass<
+ RuntimeClassFlags<ClassicCom | InhibitRoOriginateError>,
+ VectorChangedEventHandler<T>> {
+ public:
+ explicit FakeVectorChangedEventHandler(ComPtr<IObservableVector<T>> vector)
+ : vector_(std::move(vector)) {
+ EXPECT_TRUE(SUCCEEDED(vector_->add_VectorChanged(this, &token_)));
+ }
+
+ ~FakeVectorChangedEventHandler() {
+ EXPECT_TRUE(SUCCEEDED(vector_->remove_VectorChanged(token_)));
+ }
+
+ // VectorChangedEventHandler:
+ IFACEMETHODIMP Invoke(IObservableVector<T>* sender,
+ IVectorChangedEventArgs* e) {
+ sender_ = sender;
+ EXPECT_TRUE(SUCCEEDED(e->get_CollectionChange(&change_)));
+ EXPECT_TRUE(SUCCEEDED(e->get_Index(&index_)));
+ return S_OK;
+ }
+
+ IObservableVector<T>* sender() { return sender_; }
+ CollectionChange change() { return change_; }
+ unsigned int index() { return index_; }
+
+ private:
+ ComPtr<IObservableVector<T>> vector_;
+ EventRegistrationToken token_;
+ IObservableVector<T>* sender_ = nullptr;
+ CollectionChange change_ = CollectionChange_Reset;
+ unsigned int index_ = 0;
+};
+
+// The ReplaceAll test requires a non-const data() member, thus these vectors
+// are not declared const, even though no test mutates them.
+std::vector<int> g_empty;
+std::vector<int> g_one = {1};
+std::vector<int> g_one_two = {1, 2};
+std::vector<int> g_one_two_three = {1, 2, 3};
+
+} // namespace
+
+TEST(VectorTest, GetAt_Empty) {
+ auto vec = Make<Vector<int>>();
+ int value;
+ HRESULT hr = vec->GetAt(0, &value);
+ EXPECT_EQ(E_BOUNDS, hr);
+}
+
+TEST(VectorTest, GetAt_One) {
+ auto vec = Make<Vector<int>>(g_one);
+ int value;
+ HRESULT hr = vec->GetAt(0, &value);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(1, value);
+
+ hr = vec->GetAt(1, &value);
+ EXPECT_EQ(E_BOUNDS, hr);
+}
+
+TEST(VectorTest, GetAt_OneTwo) {
+ auto vec = Make<Vector<int>>(g_one_two);
+ int value;
+ HRESULT hr = vec->GetAt(0, &value);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(1, value);
+
+ hr = vec->GetAt(1, &value);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(2, value);
+
+ hr = vec->GetAt(2, &value);
+ EXPECT_EQ(E_BOUNDS, hr);
+}
+
+TEST(VectorTest, GetAt_OneTwoThree) {
+ auto vec = Make<Vector<int>>(g_one_two_three);
+ int value;
+ HRESULT hr = vec->GetAt(0, &value);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(1, value);
+
+ hr = vec->GetAt(1, &value);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(2, value);
+
+ hr = vec->GetAt(2, &value);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(3, value);
+
+ hr = vec->GetAt(3, &value);
+ EXPECT_EQ(E_BOUNDS, hr);
+}
+
+TEST(VectorTest, get_Size_Empty) {
+ auto vec = Make<Vector<int>>();
+ unsigned size;
+ HRESULT hr = vec->get_Size(&size);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(0u, size);
+}
+
+TEST(VectorTest, get_Size_One) {
+ auto vec = Make<Vector<int>>(g_one);
+ unsigned size;
+ HRESULT hr = vec->get_Size(&size);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(1u, size);
+}
+
+TEST(VectorTest, get_Size_OneTwo) {
+ auto vec = Make<Vector<int>>(g_one_two);
+ unsigned size;
+ HRESULT hr = vec->get_Size(&size);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(2u, size);
+}
+
+TEST(VectorTest, get_Size_OneTwoThree) {
+ auto vec = Make<Vector<int>>(g_one_two_three);
+ unsigned size;
+ HRESULT hr = vec->get_Size(&size);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(3u, size);
+}
+
+TEST(VectorTest, GetView) {
+ auto vec = Make<Vector<int>>(g_one_two_three);
+ ComPtr<IVectorView<int>> view;
+ HRESULT hr = vec->GetView(&view);
+ EXPECT_TRUE(SUCCEEDED(hr));
+
+ int value;
+ hr = view->GetAt(0, &value);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(1, value);
+
+ hr = view->GetAt(1, &value);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(2, value);
+
+ hr = view->GetAt(2, &value);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(3, value);
+
+ hr = view->GetAt(3, &value);
+ EXPECT_EQ(E_BOUNDS, hr);
+
+ unsigned size;
+ hr = view->get_Size(&size);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(3u, size);
+
+ unsigned index;
+ boolean found;
+ hr = view->IndexOf(1, &index, &found);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(0u, index);
+ EXPECT_TRUE(found);
+
+ hr = view->IndexOf(2, &index, &found);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(1u, index);
+ EXPECT_TRUE(found);
+
+ hr = view->IndexOf(3, &index, &found);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(2u, index);
+ EXPECT_TRUE(found);
+
+ hr = view->IndexOf(4, &index, &found);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(0u, index);
+ EXPECT_FALSE(found);
+
+ std::vector<int> copy(3);
+ unsigned actual;
+ hr = view->GetMany(0, copy.size(), copy.data(), &actual);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(3u, actual);
+ EXPECT_THAT(copy, ElementsAre(1, 2, 3));
+
+ hr = vec->Append(4);
+ EXPECT_TRUE(SUCCEEDED(hr));
+
+ // The view is supposed to be a snapshot of the vector when it's created.
+ // Further modifications to the vector will invalidate the view.
+ hr = view->GetAt(3, &value);
+ EXPECT_EQ(E_CHANGED_STATE, hr);
+
+ hr = view->get_Size(&size);
+ EXPECT_EQ(E_CHANGED_STATE, hr);
+
+ hr = view->IndexOf(1, &index, &found);
+ EXPECT_EQ(E_CHANGED_STATE, hr);
+
+ hr = view->GetMany(0, copy.size(), copy.data(), &actual);
+ EXPECT_EQ(E_CHANGED_STATE, hr);
+}
+
+TEST(VectorTest, IndexOf_Empty) {
+ auto vec = Make<Vector<int>>();
+ unsigned index;
+ boolean found;
+ HRESULT hr = vec->IndexOf(1, &index, &found);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(0u, index);
+ EXPECT_FALSE(found);
+}
+
+TEST(VectorTest, IndexOf_One) {
+ auto vec = Make<Vector<int>>(g_one);
+ unsigned index;
+ boolean found;
+
+ HRESULT hr = vec->IndexOf(1, &index, &found);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(0u, index);
+ EXPECT_TRUE(found);
+
+ hr = vec->IndexOf(2, &index, &found);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(0u, index);
+ EXPECT_FALSE(found);
+}
+
+TEST(VectorTest, IndexOf_OneTwo) {
+ auto vec = Make<Vector<int>>(g_one_two);
+ unsigned index;
+ boolean found;
+
+ HRESULT hr = vec->IndexOf(1, &index, &found);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(0u, index);
+ EXPECT_TRUE(found);
+
+ hr = vec->IndexOf(2, &index, &found);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(1u, index);
+ EXPECT_TRUE(found);
+
+ hr = vec->IndexOf(3, &index, &found);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(0u, index);
+ EXPECT_FALSE(found);
+}
+
+TEST(VectorTest, IndexOf_OneTwoThree) {
+ auto vec = Make<Vector<int>>(g_one_two_three);
+ unsigned index;
+ boolean found;
+
+ HRESULT hr = vec->IndexOf(1, &index, &found);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(0u, index);
+ EXPECT_TRUE(found);
+
+ hr = vec->IndexOf(2, &index, &found);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(1u, index);
+ EXPECT_TRUE(found);
+
+ hr = vec->IndexOf(3, &index, &found);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(2u, index);
+ EXPECT_TRUE(found);
+
+ hr = vec->IndexOf(4, &index, &found);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(0u, index);
+ EXPECT_FALSE(found);
+}
+
+TEST(VectorTest, SetAt) {
+ auto vec = Make<Vector<int>>(g_one_two_three);
+ auto handler = Make<FakeVectorChangedEventHandler<int>>(vec.Get());
+
+ HRESULT hr = vec->SetAt(0, 4);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_THAT(vec->vector_for_testing(), ElementsAre(4, 2, 3));
+ EXPECT_EQ(vec.Get(), handler->sender());
+ EXPECT_EQ(CollectionChange_ItemChanged, handler->change());
+ EXPECT_EQ(0u, handler->index());
+
+ hr = vec->SetAt(1, 5);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_THAT(vec->vector_for_testing(), ElementsAre(4, 5, 3));
+ EXPECT_EQ(vec.Get(), handler->sender());
+ EXPECT_EQ(CollectionChange_ItemChanged, handler->change());
+ EXPECT_EQ(1u, handler->index());
+
+ hr = vec->SetAt(2, 6);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_THAT(vec->vector_for_testing(), ElementsAre(4, 5, 6));
+ EXPECT_EQ(vec.Get(), handler->sender());
+ EXPECT_EQ(CollectionChange_ItemChanged, handler->change());
+ EXPECT_EQ(2u, handler->index());
+
+ hr = vec->SetAt(3, 7);
+ EXPECT_EQ(E_BOUNDS, hr);
+}
+
+TEST(VectorTest, InsertAt) {
+ auto vec = Make<Vector<int>>(g_one_two_three);
+ auto handler = Make<FakeVectorChangedEventHandler<int>>(vec.Get());
+ HRESULT hr = vec->InsertAt(4, 4);
+ EXPECT_EQ(E_BOUNDS, hr);
+
+ hr = vec->InsertAt(3, 4);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_THAT(vec->vector_for_testing(), ElementsAre(1, 2, 3, 4));
+ EXPECT_EQ(vec.Get(), handler->sender());
+ EXPECT_EQ(CollectionChange_ItemInserted, handler->change());
+ EXPECT_EQ(3u, handler->index());
+
+ hr = vec->InsertAt(2, 5);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_THAT(vec->vector_for_testing(), ElementsAre(1, 2, 5, 3, 4));
+ EXPECT_EQ(vec.Get(), handler->sender());
+ EXPECT_EQ(CollectionChange_ItemInserted, handler->change());
+ EXPECT_EQ(2u, handler->index());
+
+ hr = vec->InsertAt(1, 6);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_THAT(vec->vector_for_testing(), ElementsAre(1, 6, 2, 5, 3, 4));
+ EXPECT_EQ(vec.Get(), handler->sender());
+ EXPECT_EQ(CollectionChange_ItemInserted, handler->change());
+ EXPECT_EQ(1u, handler->index());
+
+ hr = vec->InsertAt(0, 7);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_THAT(vec->vector_for_testing(), ElementsAre(7, 1, 6, 2, 5, 3, 4));
+ EXPECT_EQ(vec.Get(), handler->sender());
+ EXPECT_EQ(CollectionChange_ItemInserted, handler->change());
+ EXPECT_EQ(0u, handler->index());
+}
+
+TEST(VectorTest, RemoveAt) {
+ auto vec = Make<Vector<int>>(g_one_two_three);
+ auto handler = Make<FakeVectorChangedEventHandler<int>>(vec.Get());
+ HRESULT hr = vec->RemoveAt(3);
+ EXPECT_EQ(E_BOUNDS, hr);
+
+ hr = vec->RemoveAt(2);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_THAT(vec->vector_for_testing(), ElementsAre(1, 2));
+ EXPECT_EQ(vec.Get(), handler->sender());
+ EXPECT_EQ(CollectionChange_ItemRemoved, handler->change());
+ EXPECT_EQ(2u, handler->index());
+
+ hr = vec->RemoveAt(2);
+ EXPECT_EQ(E_BOUNDS, hr);
+
+ hr = vec->RemoveAt(1);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_THAT(vec->vector_for_testing(), ElementsAre(1));
+ EXPECT_EQ(vec.Get(), handler->sender());
+ EXPECT_EQ(CollectionChange_ItemRemoved, handler->change());
+ EXPECT_EQ(1u, handler->index());
+
+ hr = vec->RemoveAt(1);
+ EXPECT_EQ(E_BOUNDS, hr);
+
+ hr = vec->RemoveAt(0);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_THAT(vec->vector_for_testing(), IsEmpty());
+ EXPECT_EQ(vec.Get(), handler->sender());
+ EXPECT_EQ(CollectionChange_ItemRemoved, handler->change());
+ EXPECT_EQ(0u, handler->index());
+
+ hr = vec->RemoveAt(0);
+ EXPECT_EQ(E_BOUNDS, hr);
+}
+
+TEST(VectorTest, Append) {
+ auto vec = Make<Vector<int>>();
+ auto handler = Make<FakeVectorChangedEventHandler<int>>(vec.Get());
+ HRESULT hr = vec->Append(1);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_THAT(vec->vector_for_testing(), ElementsAre(1));
+ EXPECT_EQ(vec.Get(), handler->sender());
+ EXPECT_EQ(CollectionChange_ItemInserted, handler->change());
+ EXPECT_EQ(0u, handler->index());
+
+ hr = vec->Append(2);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_THAT(vec->vector_for_testing(), ElementsAre(1, 2));
+ EXPECT_EQ(vec.Get(), handler->sender());
+ EXPECT_EQ(CollectionChange_ItemInserted, handler->change());
+ EXPECT_EQ(1u, handler->index());
+
+ hr = vec->Append(3);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_THAT(vec->vector_for_testing(), ElementsAre(1, 2, 3));
+ EXPECT_EQ(vec.Get(), handler->sender());
+ EXPECT_EQ(CollectionChange_ItemInserted, handler->change());
+ EXPECT_EQ(2u, handler->index());
+}
+
+TEST(VectorTest, RemoveAtEnd) {
+ auto vec = Make<Vector<int>>(g_one_two_three);
+ auto handler = Make<FakeVectorChangedEventHandler<int>>(vec.Get());
+ HRESULT hr = vec->RemoveAtEnd();
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_THAT(vec->vector_for_testing(), ElementsAre(1, 2));
+ EXPECT_EQ(vec.Get(), handler->sender());
+ EXPECT_EQ(CollectionChange_ItemRemoved, handler->change());
+ EXPECT_EQ(2u, handler->index());
+
+ hr = vec->RemoveAtEnd();
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_THAT(vec->vector_for_testing(), ElementsAre(1));
+ EXPECT_EQ(vec.Get(), handler->sender());
+ EXPECT_EQ(CollectionChange_ItemRemoved, handler->change());
+ EXPECT_EQ(1u, handler->index());
+
+ hr = vec->RemoveAtEnd();
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_THAT(vec->vector_for_testing(), IsEmpty());
+ EXPECT_EQ(vec.Get(), handler->sender());
+ EXPECT_EQ(CollectionChange_ItemRemoved, handler->change());
+ EXPECT_EQ(0u, handler->index());
+
+ hr = vec->RemoveAtEnd();
+ EXPECT_EQ(E_BOUNDS, hr);
+}
+
+TEST(VectorTest, Clear) {
+ auto vec = Make<Vector<int>>(g_one_two_three);
+ auto handler = Make<FakeVectorChangedEventHandler<int>>(vec.Get());
+ HRESULT hr = vec->Clear();
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_THAT(vec->vector_for_testing(), IsEmpty());
+ EXPECT_EQ(vec.Get(), handler->sender());
+ EXPECT_EQ(CollectionChange_Reset, handler->change());
+ EXPECT_EQ(0u, handler->index());
+}
+
+TEST(VectorTest, GetMany) {
+ auto vec = Make<Vector<int>>(g_one_two_three);
+ std::vector<int> copy;
+ unsigned actual;
+ HRESULT hr = vec->GetMany(0, copy.size(), copy.data(), &actual);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(0u, actual);
+ EXPECT_THAT(copy, IsEmpty());
+
+ copy.resize(1);
+ hr = vec->GetMany(0, copy.size(), copy.data(), &actual);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(1u, actual);
+ EXPECT_THAT(copy, ElementsAre(1));
+
+ copy.resize(2);
+ hr = vec->GetMany(0, copy.size(), copy.data(), &actual);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(2u, actual);
+ EXPECT_THAT(copy, ElementsAre(1, 2));
+
+ copy.resize(3);
+ hr = vec->GetMany(0, copy.size(), copy.data(), &actual);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(3u, actual);
+ EXPECT_THAT(copy, ElementsAre(1, 2, 3));
+
+ copy.resize(4);
+ hr = vec->GetMany(0, copy.size(), copy.data(), &actual);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(3u, actual);
+ EXPECT_THAT(copy, ElementsAre(1, 2, 3, 0));
+
+ copy.resize(0);
+ hr = vec->GetMany(1, copy.size(), copy.data(), &actual);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(0u, actual);
+ EXPECT_THAT(copy, IsEmpty());
+
+ copy.resize(1);
+ hr = vec->GetMany(1, copy.size(), copy.data(), &actual);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(1u, actual);
+ EXPECT_THAT(copy, ElementsAre(2));
+
+ copy.resize(2);
+ hr = vec->GetMany(1, copy.size(), copy.data(), &actual);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(2u, actual);
+ EXPECT_THAT(copy, ElementsAre(2, 3));
+
+ copy.resize(3);
+ hr = vec->GetMany(1, copy.size(), copy.data(), &actual);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(2u, actual);
+ EXPECT_THAT(copy, ElementsAre(2, 3, 0));
+
+ copy.resize(0);
+ hr = vec->GetMany(2, copy.size(), copy.data(), &actual);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(0u, actual);
+ EXPECT_THAT(copy, IsEmpty());
+
+ copy.resize(1);
+ hr = vec->GetMany(2, copy.size(), copy.data(), &actual);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(1u, actual);
+ EXPECT_THAT(copy, ElementsAre(3));
+
+ copy.resize(2);
+ hr = vec->GetMany(2, copy.size(), copy.data(), &actual);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(1u, actual);
+ EXPECT_THAT(copy, ElementsAre(3, 0));
+
+ hr = vec->GetMany(3, copy.size(), copy.data(), &actual);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(0u, actual);
+
+ hr = vec->GetMany(4, copy.size(), copy.data(), &actual);
+ EXPECT_EQ(E_BOUNDS, hr);
+}
+
+TEST(VectorTest, ReplaceAll) {
+ auto vec = Make<Vector<int>>(g_one_two_three);
+ auto handler = Make<FakeVectorChangedEventHandler<int>>(vec.Get());
+ HRESULT hr = vec->ReplaceAll(g_empty.size(), g_empty.data());
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_THAT(vec->vector_for_testing(), IsEmpty());
+ EXPECT_EQ(vec.Get(), handler->sender());
+ EXPECT_EQ(CollectionChange_Reset, handler->change());
+ EXPECT_EQ(0u, handler->index());
+
+ hr = vec->ReplaceAll(g_one.size(), g_one.data());
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_THAT(vec->vector_for_testing(), ElementsAre(1));
+ EXPECT_EQ(vec.Get(), handler->sender());
+ EXPECT_EQ(CollectionChange_Reset, handler->change());
+ EXPECT_EQ(0u, handler->index());
+
+ hr = vec->ReplaceAll(g_one_two.size(), g_one_two.data());
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_THAT(vec->vector_for_testing(), ElementsAre(1, 2));
+ EXPECT_EQ(vec.Get(), handler->sender());
+ EXPECT_EQ(CollectionChange_Reset, handler->change());
+ EXPECT_EQ(0u, handler->index());
+
+ hr = vec->ReplaceAll(g_one_two_three.size(), g_one_two_three.data());
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_THAT(vec->vector_for_testing(), ElementsAre(1, 2, 3));
+ EXPECT_EQ(vec.Get(), handler->sender());
+ EXPECT_EQ(CollectionChange_Reset, handler->change());
+ EXPECT_EQ(0u, handler->index());
+}
+
+// Uri* is an AggregateType which ABI representation is IUriRuntimeClass*.
+TEST(VectorTest, ConstructWithAggregateType) {
+ auto vec = Make<Vector<Uri*>>();
+ unsigned size;
+ HRESULT hr = vec->get_Size(&size);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_EQ(0u, size);
+}
+
+} // namespace win
+} // namespace base